diff --git a/dbhub.go b/dbhub.go index d9e5b31..b3b911e 100644 --- a/dbhub.go +++ b/dbhub.go @@ -2,9 +2,12 @@ package dbhub import ( "encoding/base64" + "encoding/json" "fmt" "net/http" "net/url" + + com "github.com/sqlitebrowser/dbhub.io/common" ) // New creates a new DBHub.io connection object. It doesn't connect to DBHub.io to do this. @@ -17,7 +20,7 @@ func New(key string) (Connection, error) { } // Query runs a SQL query (SELECT only) on the chosen database, returning the results -func (c Connection) Query(dbowner, dbname, sql string) (Results, error) { +func (c Connection) Query(dbowner, dbname, sql string) (out Results, err error) { // Prepare the API parameters data := url.Values{} data.Set("apikey", c.APIKey) @@ -26,22 +29,46 @@ func (c Connection) Query(dbowner, dbname, sql string) (Results, error) { data.Set("sql", base64.StdEncoding.EncodeToString([]byte(sql))) // Run the query on the remote database - res, err := http.PostForm(c.Server+"/v1/query", data) + resp, err := http.PostForm(c.Server+"/v1/query", data) if err != nil { return Results{}, err } - if res.StatusCode < 200 || res.StatusCode >= 300 { + defer resp.Body.Close() + + // Basic error handling, depending on the status code received from the server + if resp.StatusCode < 200 || resp.StatusCode >= 300 { // The returned status code indicates something went wrong - return Results{}, fmt.Errorf(res.Status) - } - if res.StatusCode == 200 { - // The query ran successfully, so prepare and return the results - // TODO: TBD - fmt.Printf("Results: %v\n", res) + return Results{}, fmt.Errorf(resp.Status) } - // TODO: Figure out what should be returned here - return Results{}, nil + if resp.StatusCode != 200 { + // TODO: Figure out what should be returned for other 2** status messages + return + } + + // The query ran successfully, so prepare and return the results + var returnedData []com.DataRow + json.NewDecoder(resp.Body).Decode(&returnedData) + + // Construct the result list + for _, j := range returnedData { + + // Construct a single row + var oneRow ResultRow + for _, l := range j { + // Float, integer, and text fields are added to the output + switch l.Type { + case com.Float, com.Integer, com.Text: + oneRow.Fields = append(oneRow.Fields, fmt.Sprint(l.Value)) + default: + // All other value types are just output as an empty string (for now) + oneRow.Fields = append(oneRow.Fields, "") + } + } + // Add the row to the output list + out.Rows = append(out.Rows, oneRow) + } + return } // ChangeServer changes the address all Queries will be sent to. Useful for testing and development. diff --git a/go.mod b/go.mod index 412a9a3..f4d5918 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,16 @@ module github.com/sqlitebrowser/go-dbhub go 1.14 + +replace ( + github.com/Sirupsen/logrus v1.0.5 => github.com/sirupsen/logrus v1.0.5 + github.com/Sirupsen/logrus v1.3.0 => github.com/sirupsen/logrus v1.3.0 + github.com/Sirupsen/logrus v1.4.0 => github.com/sirupsen/logrus v1.4.0 + github.com/Sirupsen/logrus v1.5.0 => github.com/sirupsen/logrus v1.5.0 + github.com/Sirupsen/logrus v1.6.0 => github.com/sirupsen/logrus v1.6.0 +) + +require ( + github.com/Sirupsen/logrus v1.6.0 // indirect + github.com/sqlitebrowser/dbhub.io v0.0.5 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..f06a6b9 --- /dev/null +++ b/go.sum @@ -0,0 +1,144 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA= +github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/bradleypeabody/gorilla-sessions-memcache v0.0.0-20181103040241-659414f458e1/go.mod h1:dkChI7Tbtx7H1Tj7TqGSZMOeGpMP5gLHtjroHd4agiI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-ini/ini v1.46.0 h1:hDJFfs/9f75875scvqLkhNB5Jz5/DybKEOZ5MLF+ng4= +github.com/go-ini/ini v1.46.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-playground/locales v0.12.1 h1:2FITxuFt/xuCNP1Acdhv62OzaCiviiE4kotfhkmOqEc= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/universal-translator v0.16.0 h1:X++omBR/4cE2MNg91AoC3rmGrCjJ8eAeUP/K/EKx4DM= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gwenn/gosqlite v0.0.0-20190222165041-a2186711fe00 h1:igSDr1Z88p1XnjnjHbPaMzlx5yizLbQlR1pieGeh8/Y= +github.com/gwenn/gosqlite v0.0.0-20190222165041-a2186711fe00/go.mod h1:sRUzBwp0ASIStD2bQ7Iz+ty+IGPNY183sTv1YZ4hZFA= +github.com/gwenn/yacr v0.0.0-20190406104508-cfb564bd6947 h1:usc769ShRCkyzvYTmQ3HQVZVF+15XQqzQeDVWpBsGxM= +github.com/gwenn/yacr v0.0.0-20190406104508-cfb564bd6947/go.mod h1:5SNcBGxZ5OaJAMJCSI/x3V7SGsvXqbwnwP/sHZLgYsw= +github.com/hectane/go-attest v0.1.2 h1:HzAy7PhILWGy3Vr6WsTFG4k+8csZ7rYsgB9zO3HD+RE= +github.com/hectane/go-attest v0.1.2/go.mod h1:gxESRl/EHIwBfuD9xidptRiaabPkeyoDYGkIkQoOlm4= +github.com/hectane/go-nonblockingchan v0.1.0 h1:w5dFzLYim23KoK64xqfA0iSMNMA8ruLXvGkyXlZBDFY= +github.com/hectane/go-nonblockingchan v0.1.0/go.mod h1:Ztuu6NIB+3zEHbsCEXcynf5a4B49/PofiBiQUGDGbRw= +github.com/hectane/hectane v0.3.1 h1:S1aNSxLulYiKqGdYM+ROPcv8TGFmy85TZcTgzibeTz8= +github.com/hectane/hectane v0.3.1/go.mod h1:enmgBsfbzuxwQ9OLZeWYMWJt27wcegpNZORlhfw77p0= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v2.11.0+incompatible h1:IgFLUrzrhJj8mxbK44ZYExGVnjtfV4+TOkerb/XERV8= +github.com/jackc/pgx v2.11.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kennygrant/sanitize v1.2.4 h1:gN25/otpP5vAsO2djbMhF/LQX6R7+O1TB4yv8NzpJ3o= +github.com/kennygrant/sanitize v1.2.4/go.mod h1:LGsjYYtgxbetdg5owWB2mpgUL6e2nfw2eObZ0u0qvak= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/leodido/go-urn v1.1.0 h1:Sm1gr51B1kKyfD2BlRcLSiEkffoG96g6TPv6eRoEiB8= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/microcosm-cc/bluemonday v1.0.2 h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= +github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk= +github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470 h1:qb9IthCFBmROJ6YBS31BEMeSYjOscSiG+EO+JVNTz64= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20190704215121-7189cc372560 h1:SpaoQDTgpo2YZkvmr2mtgloFFfPTjtLMlZkQtNAPQik= +github.com/shurcooL/go v0.0.0-20190704215121-7189cc372560/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/highlight_diff v0.0.0-20181222201841-111da2e7d480 h1:KaKXZldeYH73dpQL+Nr38j1r5BgpAYQjYvENOUpIZDQ= +github.com/shurcooL/highlight_diff v0.0.0-20181222201841-111da2e7d480/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181215221002-9d8641ddf2e1 h1:a6a6gGfBoO2ty+yyHNd7M6gkp37EwE3GIoycUnLo1Oo= +github.com/shurcooL/highlight_go v0.0.0-20181215221002-9d8641ddf2e1/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/octicon v0.0.0-20181222203144-9ff1a4cf27f4 h1:H0v7bJx9CDGHx402wE08Fk5AS2mWdTYK9JI5vyrx8jQ= +github.com/shurcooL/octicon v0.0.0-20181222203144-9ff1a4cf27f4/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d h1:yKm7XZV6j9Ev6lojP2XaIshpT4ymkqhMeSghO5Ps00E= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/sqlitebrowser/blackfriday v9.0.0+incompatible h1:ddH/UyzasooYgGIblVU4R8DdmBuJ7QXLvSqX/0chZv4= +github.com/sqlitebrowser/blackfriday v9.0.0+incompatible/go.mod h1:/zga9sqpWzcewuI83AO5JZwe9+6F9GgPDdqqdNNEL/0= +github.com/sqlitebrowser/dbhub.io v0.0.5 h1:vCfYJuu7ExeVQk9MMmG5i0o8fW1LyG5pnrwdQu0nve4= +github.com/sqlitebrowser/dbhub.io v0.0.5/go.mod h1:rYH5dKjuzeAG8kslsxwqbTFga8TxxQ/ntB86NbnB+4E= +github.com/sqlitebrowser/github_flavored_markdown v0.0.0-20190120045821-b8cf8f054e47 h1:s0+Ea95n1LrsKh6rtclU/9Qb2/5ofvnfnR7gDDiFTw8= +github.com/sqlitebrowser/github_flavored_markdown v0.0.0-20190120045821-b8cf8f054e47/go.mod h1:8vPIKi5FslxCXEgfQxrFtWfdclGy6VWAc9NA1ZTYCJg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1 h1:SvGtYmN60a5CVKTOzMSyfzWDeZRxRuGvRQyEAKbw1xc= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/ini.v1 v1.46.0 h1:VeDZbLYGaupuvIrsYCEOe/L/2Pcs5n7hdO1ZTjporag= +gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= diff --git a/types.go b/types.go index 54d9853..3921fd0 100644 --- a/types.go +++ b/types.go @@ -6,7 +6,7 @@ type Connection struct { } type ResultRow struct { - Field []interface{} + Fields []string } type Results struct { diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 0000000..0cd3800 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,5 @@ +TAGS +tags +.*.swp +tomlcheck/tomlcheck +toml.test diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml new file mode 100644 index 0000000..8b8afc4 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip +install: + - go install ./... + - go get github.com/BurntSushi/toml-test +script: + - export PATH="$PATH:$HOME/gopath/bin" + - make test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 0000000..6efcfd0 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1,3 @@ +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) + diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000..01b5743 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile new file mode 100644 index 0000000..3600848 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/Makefile @@ -0,0 +1,19 @@ +install: + go install ./... + +test: install + go test -v + toml-test toml-test-decoder + toml-test -encoder toml-test-encoder + +fmt: + gofmt -w *.go */*.go + colcheck *.go */*.go + +tags: + find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS + +push: + git push origin master + git push github master + diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 0000000..7c1b37e --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,218 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Spec: https://github.com/toml-lang/toml + +Compatible with TOML version +[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) + +Documentation: https://godoc.org/github.com/BurntSushi/toml + +Installation: + +```bash +go get github.com/BurntSushi/toml +``` + +Try the toml validator: + +```bash +go get github.com/BurntSushi/toml/cmd/tomlv +tomlv some-toml-file.toml +``` + +[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml) + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles `XML` +and `JSON`. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 0000000..b0fd51d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,509 @@ +package toml + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "reflect" + "strings" + "time" +) + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// When using the various `Decode*` functions, the type `Primitive` may +// be given to any value, and its decoding will be delayed. +// +// A `Primitive` value can be decoded using the `PrimitiveDecode` function. +// +// The underlying representation of a `Primitive` value is subject to change. +// Do not rely on it. +// +// N.B. Primitive values are still parsed, so using them will only avoid +// the overhead of reflection. They can be useful when you don't know the +// exact type of TOML data until run time. +type Primitive struct { + undecoded interface{} + context Key +} + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decode will decode the contents of `data` in TOML format into a pointer +// `v`. +// +// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be +// used interchangeably.) +// +// TOML arrays of tables correspond to either a slice of structs or a slice +// of maps. +// +// TOML datetimes correspond to Go `time.Time` values. +// +// All other TOML types (float, string, int, bool and array) correspond +// to the obvious Go types. +// +// An exception to the above rules is if a type implements the +// encoding.TextUnmarshaler interface. In this case, any primitive TOML value +// (floats, strings, integers, booleans and datetimes) will be converted to +// a byte string and given to the value's UnmarshalText method. See the +// Unmarshaler example for a demonstration with time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go +// struct. The special `toml` struct tag may be used to map TOML keys to +// struct fields that don't match the key name exactly. (See the example.) +// A case insensitive match to struct names will be tried if an exact match +// can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there +// may exist TOML values that cannot be placed into your representation, and +// there may be parts of your representation that do not correspond to +// TOML values. This loose mapping can be made stricter by using the IsDefined +// and/or Undecoded methods on the MetaData returned. +// +// This decoder will not handle cyclic types. If a cyclic type is passed, +// `Decode` will not terminate. +func Decode(data string, v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + p, err := parse(data) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at `fpath` and decode it for you. +func DecodeFile(fpath string, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadFile(fpath) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// DecodeReader is just like Decode, except it will consume all bytes +// from the reader and decode it for you. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + bs, err := ioutil.ReadAll(r) + if err != nil { + return MetaData{}, err + } + return Decode(string(bs), v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + + // Special case. Look for a `Primitive` value. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Handle time.Time values specifically. + // TODO: Remove this code when we decide to drop support for Go 1.1. + // This isn't necessary in Go 1.2 because time.Time satisfies the encoding + // interfaces. + if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { + return md.unifyDatetime(data, rv) + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // BUG(burntsushi) + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML + // hash or array. In particular, the unmarshaler should only be applied + // to primitive TOML values. But at this point, it will be applied to + // all kinds of values and produce an incorrect error whenever those values + // are hashes or arrays (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + sliceLen := datav.Len() + if sliceLen != rv.Len() { + return e("expected array length %d; got TOML array of length %d", + rv.Len(), sliceLen) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + sliceLen := data.Len() + for i := 0; i < sliceLen; i++ { + v := data.Index(i).Interface() + sliceval := indirect(rv.Index(i)) + if err := md.unify(v, sliceval); err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(TextUnmarshaler); ok { + return true + } + return false +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 0000000..b9914a6 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,121 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not +// be inferrable via reflection. In particular, whether a key has been defined +// and the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined returns true if the key given exists in the TOML data. The key +// should be specified hierarchially. e.g., +// +// // access the TOML key 'a.b.c' +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that +// does not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key is the type of any TOML key, including key groups. Use (MetaData).Keys +// to get values of this type. +type Key []string + +func (k Key) String() string { + return strings.Join(k, ".") +} + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. +// +// The list will have the same order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 0000000..b371f39 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,27 @@ +/* +Package toml provides facilities for decoding and encoding TOML configuration +files via reflection. There is also support for delaying decoding with +the Primitive type, and querying the set of keys in a TOML document with the +MetaData type. + +The specification implemented: https://github.com/toml-lang/toml + +The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify +whether a file is a valid TOML document. It can also be used to print the +type of each key in a TOML document. + +Testing + +There are two important types of tests used for this package. The first is +contained inside '*_test.go' files and uses the standard Go unit testing +framework. These tests are primarily devoted to holistically testing the +decoder and encoder. + +The second type of testing is used to verify the implementation's adherence +to the TOML specification. These tests have been factored into their own +project: https://github.com/BurntSushi/toml-test + +The reason the tests are in a separate project is so that they can be used by +any implementation of TOML. Namely, it is language agnostic. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 0000000..d905c21 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,568 @@ +package toml + +import ( + "bufio" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayMixedElementTypes = errors.New( + "toml: cannot encode array with mixed element types") + errArrayNilElement = errors.New( + "toml: cannot encode array with nil element") + errNonString = errors.New( + "toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New( + "toml: cannot encode an anonymous field that is not a struct") + errArrayNoTable = errors.New( + "toml: TOML array element cannot contain a table") + errNoKey = errors.New( + "toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\t", "\\t", + "\n", "\\n", + "\r", "\\r", + "\"", "\\\"", + "\\", "\\\\", +) + +// Encoder controls the encoding of Go values to a TOML document to some +// io.Writer. +// +// The indentation level can be controlled with the Indent field. +type Encoder struct { + // A single indentation level. By default it is two spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer +// given. By default, a single indentation level is 2 spaces. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the underlying +// io.Writer. If the value given cannot be encoded to a valid TOML document, +// then an error is returned. +// +// The mapping between Go values and TOML values should be precisely the same +// as for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. (If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types.) +// +// When encoding TOML hashes (i.e., Go maps or structs), keys without any +// sub-hashes are encoded first. +// +// If a Go map is encoded, then its keys are sorted alphabetically for +// deterministic output. More control over this behavior may be provided if +// there is demand for it. +// +// Encoding Go values without a corresponding TOML representation---like map +// types with non-string keys---will cause an error to be returned. Similarly +// for mixed arrays/slices, arrays/slices with nil elements, embedded +// non-struct types and nested slices containing maps or structs. +// (e.g., [][]map[string]string is not allowed but []map[string]string is OK +// and so is []map[string][]string.) +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch rv.Interface().(type) { + case time.Time, TextMarshaler: + enc.keyEqElement(key, rv) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.keyEqElement(key, rv) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.keyEqElement(key, rv) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + panic(e("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element (primitives and +// arrays). +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: + // Special case time.Time as a primitive. Has to come before + // TextMarshaler below because time.Time implements + // encoding.TextMarshaler, but we need to always use UTC. + enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) + return + case TextMarshaler: + // Special case. Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + switch rv.Kind() { + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, + reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) + case reflect.Float64: + enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Interface: + enc.eElement(rv.Elem()) + case reflect.String: + enc.writeQuoted(rv.String()) + default: + panic(e("unexpected primitive type: %s", rv.Kind())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one +// number on either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + panicIfInvalidKey(key) + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + panicIfInvalidKey(key) + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv) + case reflect.Struct: + enc.eStruct(key, rv) + default: + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string) { + sort.Strings(mapKeys) + for _, mapKey := range mapKeys { + mrv := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(mrv) { + // Don't write anything for nil fields. + continue + } + enc.encode(key.add(mapKey), mrv) + } + } + writeMapKeys(mapKeysDirect) + writeMapKeys(mapKeysSub) +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table, then all keys under it will be in that + // table (not the one we're writing here). + rt := rv.Type() + var fieldsDirect, fieldsSub [][]int + var addFields func(rt reflect.Type, rv reflect.Value, start []int) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + // skip unexported fields + if f.PkgPath != "" && !f.Anonymous { + continue + } + frv := rv.Field(i) + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + // Treat anonymous struct fields with + // tag names as though they are not + // anonymous, like encoding/json does. + if getOptions(f.Tag).name == "" { + addFields(t, frv, f.Index) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && + getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), f.Index) + } + continue + } + // Fall through to the normal field encoding logic below + // for non-struct anonymous fields. + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + var writeFields = func(fields [][]int) { + for _, fieldIndex := range fields { + sft := rt.FieldByIndex(fieldIndex) + sf := rv.FieldByIndex(fieldIndex) + if isNil(sf) { + // Don't write anything for nil fields. + continue + } + + opts := getOptions(sft.Tag) + if opts.skip { + continue + } + keyName := sft.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(sf) { + continue + } + if opts.omitzero && isZero(sf) { + continue + } + + enc.encode(key.add(keyName), sf) + } + } + writeFields(fieldsDirect) + writeFields(fieldsSub) +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case TextMarshaler: + return tomlString + default: + return tomlHash + } + default: + panic("unexpected reflect.Kind: " + rv.Kind().String()) + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + elem := rv.Index(i) + switch elemType := tomlTypeOfGo(elem); { + case elemType == nil: + encPanic(errArrayNilElement) + case !typeEqual(firstType, elemType): + encPanic(errArrayMixedElementTypes) + } + } + // If we have a nested array, then we must make sure that the nested + // array contains ONLY primitives. + // This checks arbitrarily nested arrays. + if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { + nest := tomlArrayType(eindirect(rv.Index(0))) + if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { + encPanic(errArrayNoTable) + } + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + panicIfInvalidKey(key) + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + enc.newline() +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} + +func panicIfInvalidKey(key Key) { + for _, k := range key { + if len(k) == 0 { + encPanic(e("Key '%s' is not a valid table name. Key names "+ + "cannot be empty.", key.maybeQuotedAll())) + } + } +} + +func isValidKeyName(s string) bool { + return len(s) != 0 +} diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go new file mode 100644 index 0000000..d36e1dd --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types.go @@ -0,0 +1,19 @@ +// +build go1.2 + +package toml + +// In order to support Go 1.1, we define our own TextMarshaler and +// TextUnmarshaler types. For Go 1.2+, we just alias them with the +// standard library interfaces. + +import ( + "encoding" +) + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go new file mode 100644 index 0000000..e8d503d --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go @@ -0,0 +1,18 @@ +// +build !go1.2 + +package toml + +// These interfaces were introduced in Go 1.2, so we add them manually when +// compiling for Go 1.1. + +// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here +// so that Go 1.1 can be supported. +type TextMarshaler interface { + MarshalText() (text []byte, err error) +} + +// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined +// here so that Go 1.1 can be supported. +type TextUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 0000000..e0a742a --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,953 @@ +package toml + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to three runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [3]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 3 { + lx.nprev++ + } + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called only twice between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, "+ + "comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf("expected end of table array name delimiter %q, "+ + "but got %q instead", arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name " + + "(table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator " + + "(table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexValue // reuse string lexing + default: + return lexBareTableName + } +} + +// lexBareTableName lexes the name of a table. It assumes that at least one +// valid character for the table has already been read. +func lexBareTableName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareTableName + } + lx.backup() + lx.emit(itemText) + return lexTableNameEnd +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, "+ + "but got %q instead", r) + } +} + +// lexKeyStart consumes a key name up until the first non-whitespace character. +// lexKeyStart will ignore whitespace. +func lexKeyStart(lx *lexer) stateFn { + r := lx.peek() + switch { + case r == keySep: + return lx.errorf("unexpected key separator %q", keySep) + case isWhitespace(r) || isNL(r): + lx.next() + return lexSkip(lx, lexKeyStart) + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.emit(itemKeyStart) + lx.push(lexKeyEnd) + return lexValue // reuse string lexing + default: + lx.ignore() + lx.emit(itemKeyStart) + return lexBareKey + } +} + +// lexBareKey consumes the text of a bare key. Assumes that the first character +// (which is not whitespace) has not yet been consumed. +func lexBareKey(lx *lexer) stateFn { + switch r := lx.next(); { + case isBareKeyChar(r): + return lexBareKey + case isWhitespace(r): + lx.backup() + lx.emit(itemText) + return lexKeyEnd + case r == keySep: + lx.backup() + lx.emit(itemText) + return lexKeyEnd + default: + return lx.errorf("bare keys cannot contain %q", r) + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case r == keySep: + return lexSkip(lx, lexValue) + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + default: + return lx.errorf("expected key separator %q, but got %q instead", + keySep, r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '+', '-': + return lexNumberStart + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %q instead", + arrayEnd, r, + ) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + } + return lx.errorf("expected a comma or an inline table terminator %q, "+ + "but got %q instead", inlineTableEnd, r) +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case '\\': + return lexMultilineStringEscape + case stringEnd: + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf("unexpected EOF") + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + switch lx.next() { + case eof: + return lx.errorf("unexpected EOF") + case rawStringEnd: + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + lx.backup() + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following "+ + "escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', `+ + "but got %q instead", lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart consumes either an integer, a float, or datetime. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + return lexNumber + case 'e', 'E': + return lexFloat + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-': + return lexDatetime + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', 'T', ':', '.', 'Z', '+': + return lexDatetime + } + + lx.backup() + lx.emit(itemDatetime) + return lx.pop() +} + +// lexNumberStart consumes either an integer or a float. It assumes that a sign +// has already been read, but that *no* digits have been consumed. +// lexNumberStart will move to the appropriate integer or float states. +func lexNumberStart(lx *lexer) stateFn { + // We MUST see a digit. Even floats have to start with a digit. + r := lx.next() + if !isDigit(r) { + if r == '.' { + return lx.errorf("floats must start with a digit, not '.'") + } + return lx.errorf("expected a digit but got %q", r) + } + return lexNumber +} + +// lexNumber consumes an integer or a float after seeing the first digit. +func lexNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumber + } + switch r { + case '_': + return lexNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + r := lx.peek() + if isNL(r) || r == eof { + lx.emit(itemText) + return lx.pop() + } + lx.next() + return lexComment +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + return func(lx *lexer) stateFn { + lx.ignore() + return nextState + } +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 0000000..50869ef --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,592 @@ +package toml + +import ( + "fmt" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + // A list of keys in the order that they appear in the TOML data. + ordered []Key + + // the full key for the current hash in scope + context Key + + // the base key name for everything except hashes + currentKey string + + // rough approximation of line number + approxLine int + + // A map of 'key.group.names' to whether they were created implicitly. + implicits map[string]bool +} + +type parseError string + +func (pe parseError) Error() string { + return string(pe) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(parseError); ok { + return + } + panic(r) + } + }() + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + p.approxLine, p.current(), fmt.Sprintf(format, v...)) + panic(parseError(msg)) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemTableEnd, kg.typ) + + p.establishContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: + kg := p.next() + p.approxLine = kg.line + + var key Key + for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { + key = append(key, p.keyString(kg)) + } + p.assertEqual(itemArrayTableEnd, kg.typ) + + p.establishContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: + kname := p.next() + p.approxLine = kname.line + p.currentKey = p.keyString(kname) + + val, typ := p.value(p.next()) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + panic("unreachable") + } +} + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) + return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + } + p.bug("Expected boolean value, but got '%s'.", it.val) + case itemInteger: + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", + it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseInt(val, 10, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Integer '%s' is out of the range of 64-bit "+ + "signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemFloat: + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be "+ + "surrounded by digits", it.val) + } + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed "+ + "by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && + e.Err == strconv.ErrRange { + + p.panicf("Float '%s' is out of the range of 64-bit "+ + "IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) + case itemDatetime: + var t time.Time + var ok bool + var err error + for _, format := range []string{ + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05", + "2006-01-02", + } { + t, err = time.ParseInLocation(format, it.val, time.Local) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) + case itemArray: + array := make([]interface{}, 0) + types := make([]tomlType, 0) + + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it) + array = append(array, val) + types = append(types, typ) + } + return array, p.typeOfArray(types) + case itemInlineTableStart: + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + p.currentKey = "" + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ != itemKeyStart { + p.bug("Expected key start but instead found %q, around line %d", + it.val, p.approxLine) + } + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + // retrieve key + k := p.next() + p.approxLine = k.line + kname := p.keyString(k) + + // retrieve value + p.currentKey = kname + val, typ := p.value(p.next()) + // make sure we keep metadata up to date + p.setType(kname, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[kname] = val + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash + } + p.bug("Unexpected value type: %s", it.typ) + panic("unreachable") +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + accept = false + continue + } + accept = true + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// establishContext sets the current context of the parser, +// where the context is either a hash or an array of hashes. Which one is +// set depends on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) establishContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 5) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as "+ + "an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var tmpHash interface{} + var ok bool + + hash := p.mapping + keyContext := make(Key, 0) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.bug("Expected hash to have type 'map[string]interface{}', but "+ + "it has '%T' instead.", tmpHash) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Typically, if the given key has already been set, then we have + // to raise an error since duplicate keys are disallowed. However, + // it's possible that a key was previously defined implicitly. In this + // case, it is allowed to be redefined concretely. (See the + // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// addImplicit sets the given Key as having been created implicitly. +func (p *parser) addImplicit(key Key) { + p.implicits[key.String()] = true +} + +// removeImplicit stops tagging the given key as having been implicitly +// created. +func (p *parser) removeImplicit(key Key) { + p.implicits[key.String()] = false +} + +// isImplicit returns true if the key group pointed to by the key was created +// implicitly. +func (p *parser) isImplicit(key Key) bool { + return p.implicits[key.String()] +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) == 0 || s[0] != '\n' { + return s + } + return s[1:] +} + +func stripEscapedWhitespace(s string) string { + esc := strings.Split(s, "\\\n") + if len(esc) > 1 { + for i := 1; i < len(esc); i++ { + esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) + } + } + return strings.Join(esc, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} + +func isStringType(ty itemType) bool { + return ty == itemString || ty == itemMultilineString || + ty == itemRawString || ty == itemRawMultilineString +} diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim new file mode 100644 index 0000000..562164b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/session.vim @@ -0,0 +1 @@ +au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 0000000..c73f8af --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,91 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} + +// typeOfArray returns a tomlType for an array given a list of types of its +// values. +// +// In the current spec, if an array is homogeneous, then its type is always +// "Array". If the array is not homogeneous, an error is generated. +func (p *parser) typeOfArray(types []tomlType) tomlType { + // Empty arrays are cool. + if len(types) == 0 { + return tomlArray + } + + theType := types[0] + for _, t := range types[1:] { + if !typeEqual(theType, t) { + p.panicf("Array contains values of type '%s' and '%s', but "+ + "arrays must be homogeneous.", theType, t) + } + } + return tomlArray +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 0000000..608997c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 0000000..6b7d7d1 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1,2 @@ +logrus +vendor diff --git a/vendor/github.com/Sirupsen/logrus/.golangci.yml b/vendor/github.com/Sirupsen/logrus/.golangci.yml new file mode 100644 index 0000000..65dc285 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 0000000..5e20aa4 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,17 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: [1.13.x, 1.14.x] +os: [linux, osx] +install: + - ./travis/install.sh +script: + - ./travis/cross_build.sh + - ./travis/lint.sh + - export GOMAXPROCS=4 + - export GORACE=halt_on_error=1 + - go test -race -v ./... + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md new file mode 100644 index 0000000..584026d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,223 @@ +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE new file mode 100644 index 0000000..f090cb4 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md new file mode 100644 index 0000000..5796706 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -0,0 +1,513 @@ +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```json +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000..8fd189e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml new file mode 100644 index 0000000..df9d65c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 0000000..da67aba --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000..f6e062a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,426 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch t.Kind() { + case reflect.Func: + isErrField = true + case reflect.Ptr: + isErrField = t.Elem().Kind() == reflect.Func + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + + entry.Level = level + entry.Message = msg + entry.Logger.mu.Lock() + if entry.Logger.ReportCaller { + entry.Caller = getCaller() + } + entry.Logger.mu.Unlock() + + entry.fireHooks() + + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + + entry.write() + + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + if _, err = entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000..42b04f6 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,225 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000..4088837 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/Sirupsen/logrus/go.mod b/vendor/github.com/Sirupsen/logrus/go.mod new file mode 100644 index 0000000..d413296 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/go.mod @@ -0,0 +1,11 @@ +module github.com/sirupsen/logrus + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.3 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/sys v0.0.0-20190422165155-953cdadca894 +) + +go 1.13 diff --git a/vendor/github.com/Sirupsen/logrus/go.sum b/vendor/github.com/Sirupsen/logrus/go.sum new file mode 100644 index 0000000..49c690f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/go.sum @@ -0,0 +1,12 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000..3f151cd --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000..ba7f237 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,125 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000..6fdda74 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,352 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.JSONFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000..2f16224 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 0000000..2403de9 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 0000000..4997899 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_js.go b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go new file mode 100644 index 0000000..ebdae3e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/Sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 0000000..97af92c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 0000000..3293fb3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 0000000..f6710b3 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 0000000..cc4fe6e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 0000000..572889d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,34 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} + +func checkIfTerminal(w io.Writer) bool { + var ret bool + switch v := w.(type) { + case *os.File: + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) + ret = (err == nil) + default: + ret = false + } + if ret { + initTerminal(w) + } + return ret +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000..3c28b54 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,334 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 0000000..72e8e3a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,70 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + go entry.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/bradfitz/gomemcache/LICENSE b/vendor/github.com/bradfitz/gomemcache/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/bradfitz/gomemcache/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go new file mode 100644 index 0000000..25e88ca --- /dev/null +++ b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go @@ -0,0 +1,687 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package memcache provides a client for the memcached cache server. +package memcache + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "net" + + "strconv" + "strings" + "sync" + "time" +) + +// Similar to: +// https://godoc.org/google.golang.org/appengine/memcache + +var ( + // ErrCacheMiss means that a Get failed because the item wasn't present. + ErrCacheMiss = errors.New("memcache: cache miss") + + // ErrCASConflict means that a CompareAndSwap call failed due to the + // cached value being modified between the Get and the CompareAndSwap. + // If the cached value was simply evicted rather than replaced, + // ErrNotStored will be returned instead. + ErrCASConflict = errors.New("memcache: compare-and-swap conflict") + + // ErrNotStored means that a conditional write operation (i.e. Add or + // CompareAndSwap) failed because the condition was not satisfied. + ErrNotStored = errors.New("memcache: item not stored") + + // ErrServer means that a server error occurred. + ErrServerError = errors.New("memcache: server error") + + // ErrNoStats means that no statistics were available. + ErrNoStats = errors.New("memcache: no statistics available") + + // ErrMalformedKey is returned when an invalid key is used. + // Keys must be at maximum 250 bytes long and not + // contain whitespace or control characters. + ErrMalformedKey = errors.New("malformed: key is too long or contains invalid characters") + + // ErrNoServers is returned when no servers are configured or available. + ErrNoServers = errors.New("memcache: no servers configured or available") +) + +const ( + // DefaultTimeout is the default socket read/write timeout. + DefaultTimeout = 100 * time.Millisecond + + // DefaultMaxIdleConns is the default maximum number of idle connections + // kept for any single address. + DefaultMaxIdleConns = 2 +) + +const buffered = 8 // arbitrary buffered channel size, for readability + +// resumableError returns true if err is only a protocol-level cache error. +// This is used to determine whether or not a server connection should +// be re-used or not. If an error occurs, by default we don't reuse the +// connection, unless it was just a cache error. +func resumableError(err error) bool { + switch err { + case ErrCacheMiss, ErrCASConflict, ErrNotStored, ErrMalformedKey: + return true + } + return false +} + +func legalKey(key string) bool { + if len(key) > 250 { + return false + } + for i := 0; i < len(key); i++ { + if key[i] <= ' ' || key[i] == 0x7f { + return false + } + } + return true +} + +var ( + crlf = []byte("\r\n") + space = []byte(" ") + resultOK = []byte("OK\r\n") + resultStored = []byte("STORED\r\n") + resultNotStored = []byte("NOT_STORED\r\n") + resultExists = []byte("EXISTS\r\n") + resultNotFound = []byte("NOT_FOUND\r\n") + resultDeleted = []byte("DELETED\r\n") + resultEnd = []byte("END\r\n") + resultOk = []byte("OK\r\n") + resultTouched = []byte("TOUCHED\r\n") + + resultClientErrorPrefix = []byte("CLIENT_ERROR ") +) + +// New returns a memcache client using the provided server(s) +// with equal weight. If a server is listed multiple times, +// it gets a proportional amount of weight. +func New(server ...string) *Client { + ss := new(ServerList) + ss.SetServers(server...) + return NewFromSelector(ss) +} + +// NewFromSelector returns a new Client using the provided ServerSelector. +func NewFromSelector(ss ServerSelector) *Client { + return &Client{selector: ss} +} + +// Client is a memcache client. +// It is safe for unlocked use by multiple concurrent goroutines. +type Client struct { + // Timeout specifies the socket read/write timeout. + // If zero, DefaultTimeout is used. + Timeout time.Duration + + // MaxIdleConns specifies the maximum number of idle connections that will + // be maintained per address. If less than one, DefaultMaxIdleConns will be + // used. + // + // Consider your expected traffic rates and latency carefully. This should + // be set to a number higher than your peak parallel requests. + MaxIdleConns int + + selector ServerSelector + + lk sync.Mutex + freeconn map[string][]*conn +} + +// Item is an item to be got or stored in a memcached server. +type Item struct { + // Key is the Item's key (250 bytes maximum). + Key string + + // Value is the Item's value. + Value []byte + + // Flags are server-opaque flags whose semantics are entirely + // up to the app. + Flags uint32 + + // Expiration is the cache expiration time, in seconds: either a relative + // time from now (up to 1 month), or an absolute Unix epoch time. + // Zero means the Item has no expiration time. + Expiration int32 + + // Compare and swap ID. + casid uint64 +} + +// conn is a connection to a server. +type conn struct { + nc net.Conn + rw *bufio.ReadWriter + addr net.Addr + c *Client +} + +// release returns this connection back to the client's free pool +func (cn *conn) release() { + cn.c.putFreeConn(cn.addr, cn) +} + +func (cn *conn) extendDeadline() { + cn.nc.SetDeadline(time.Now().Add(cn.c.netTimeout())) +} + +// condRelease releases this connection if the error pointed to by err +// is nil (not an error) or is only a protocol level error (e.g. a +// cache miss). The purpose is to not recycle TCP connections that +// are bad. +func (cn *conn) condRelease(err *error) { + if *err == nil || resumableError(*err) { + cn.release() + } else { + cn.nc.Close() + } +} + +func (c *Client) putFreeConn(addr net.Addr, cn *conn) { + c.lk.Lock() + defer c.lk.Unlock() + if c.freeconn == nil { + c.freeconn = make(map[string][]*conn) + } + freelist := c.freeconn[addr.String()] + if len(freelist) >= c.maxIdleConns() { + cn.nc.Close() + return + } + c.freeconn[addr.String()] = append(freelist, cn) +} + +func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) { + c.lk.Lock() + defer c.lk.Unlock() + if c.freeconn == nil { + return nil, false + } + freelist, ok := c.freeconn[addr.String()] + if !ok || len(freelist) == 0 { + return nil, false + } + cn = freelist[len(freelist)-1] + c.freeconn[addr.String()] = freelist[:len(freelist)-1] + return cn, true +} + +func (c *Client) netTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + return DefaultTimeout +} + +func (c *Client) maxIdleConns() int { + if c.MaxIdleConns > 0 { + return c.MaxIdleConns + } + return DefaultMaxIdleConns +} + +// ConnectTimeoutError is the error type used when it takes +// too long to connect to the desired host. This level of +// detail can generally be ignored. +type ConnectTimeoutError struct { + Addr net.Addr +} + +func (cte *ConnectTimeoutError) Error() string { + return "memcache: connect timeout to " + cte.Addr.String() +} + +func (c *Client) dial(addr net.Addr) (net.Conn, error) { + type connError struct { + cn net.Conn + err error + } + + nc, err := net.DialTimeout(addr.Network(), addr.String(), c.netTimeout()) + if err == nil { + return nc, nil + } + + if ne, ok := err.(net.Error); ok && ne.Timeout() { + return nil, &ConnectTimeoutError{addr} + } + + return nil, err +} + +func (c *Client) getConn(addr net.Addr) (*conn, error) { + cn, ok := c.getFreeConn(addr) + if ok { + cn.extendDeadline() + return cn, nil + } + nc, err := c.dial(addr) + if err != nil { + return nil, err + } + cn = &conn{ + nc: nc, + addr: addr, + rw: bufio.NewReadWriter(bufio.NewReader(nc), bufio.NewWriter(nc)), + c: c, + } + cn.extendDeadline() + return cn, nil +} + +func (c *Client) onItem(item *Item, fn func(*Client, *bufio.ReadWriter, *Item) error) error { + addr, err := c.selector.PickServer(item.Key) + if err != nil { + return err + } + cn, err := c.getConn(addr) + if err != nil { + return err + } + defer cn.condRelease(&err) + if err = fn(c, cn.rw, item); err != nil { + return err + } + return nil +} + +func (c *Client) FlushAll() error { + return c.selector.Each(c.flushAllFromAddr) +} + +// Get gets the item for the given key. ErrCacheMiss is returned for a +// memcache cache miss. The key must be at most 250 bytes in length. +func (c *Client) Get(key string) (item *Item, err error) { + err = c.withKeyAddr(key, func(addr net.Addr) error { + return c.getFromAddr(addr, []string{key}, func(it *Item) { item = it }) + }) + if err == nil && item == nil { + err = ErrCacheMiss + } + return +} + +// Touch updates the expiry for the given key. The seconds parameter is either +// a Unix timestamp or, if seconds is less than 1 month, the number of seconds +// into the future at which time the item will expire. Zero means the item has +// no expiration time. ErrCacheMiss is returned if the key is not in the cache. +// The key must be at most 250 bytes in length. +func (c *Client) Touch(key string, seconds int32) (err error) { + return c.withKeyAddr(key, func(addr net.Addr) error { + return c.touchFromAddr(addr, []string{key}, seconds) + }) +} + +func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) { + if !legalKey(key) { + return ErrMalformedKey + } + addr, err := c.selector.PickServer(key) + if err != nil { + return err + } + return fn(addr) +} + +func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) { + cn, err := c.getConn(addr) + if err != nil { + return err + } + defer cn.condRelease(&err) + return fn(cn.rw) +} + +func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error { + return c.withKeyAddr(key, func(addr net.Addr) error { + return c.withAddrRw(addr, fn) + }) +} + +func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + if err := parseGetResponse(rw.Reader, cb); err != nil { + return err + } + return nil + }) +} + +// flushAllFromAddr send the flush_all command to the given addr +func (c *Client) flushAllFromAddr(addr net.Addr) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultOk): + break + default: + return fmt.Errorf("memcache: unexpected response line from flush_all: %q", string(line)) + } + return nil + }) +} + +func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error { + return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error { + for _, key := range keys { + if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultTouched): + break + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + default: + return fmt.Errorf("memcache: unexpected response line from touch: %q", string(line)) + } + } + return nil + }) +} + +// GetMulti is a batch version of Get. The returned map from keys to +// items may have fewer elements than the input slice, due to memcache +// cache misses. Each key must be at most 250 bytes in length. +// If no error is returned, the returned map will also be non-nil. +func (c *Client) GetMulti(keys []string) (map[string]*Item, error) { + var lk sync.Mutex + m := make(map[string]*Item) + addItemToMap := func(it *Item) { + lk.Lock() + defer lk.Unlock() + m[it.Key] = it + } + + keyMap := make(map[net.Addr][]string) + for _, key := range keys { + if !legalKey(key) { + return nil, ErrMalformedKey + } + addr, err := c.selector.PickServer(key) + if err != nil { + return nil, err + } + keyMap[addr] = append(keyMap[addr], key) + } + + ch := make(chan error, buffered) + for addr, keys := range keyMap { + go func(addr net.Addr, keys []string) { + ch <- c.getFromAddr(addr, keys, addItemToMap) + }(addr, keys) + } + + var err error + for _ = range keyMap { + if ge := <-ch; ge != nil { + err = ge + } + } + return m, err +} + +// parseGetResponse reads a GET response from r and calls cb for each +// read and allocated Item +func parseGetResponse(r *bufio.Reader, cb func(*Item)) error { + for { + line, err := r.ReadSlice('\n') + if err != nil { + return err + } + if bytes.Equal(line, resultEnd) { + return nil + } + it := new(Item) + size, err := scanGetResponseLine(line, it) + if err != nil { + return err + } + it.Value = make([]byte, size+2) + _, err = io.ReadFull(r, it.Value) + if err != nil { + it.Value = nil + return err + } + if !bytes.HasSuffix(it.Value, crlf) { + it.Value = nil + return fmt.Errorf("memcache: corrupt get result read") + } + it.Value = it.Value[:size] + cb(it) + } +} + +// scanGetResponseLine populates it and returns the declared size of the item. +// It does not read the bytes of the item. +func scanGetResponseLine(line []byte, it *Item) (size int, err error) { + pattern := "VALUE %s %d %d %d\r\n" + dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid} + if bytes.Count(line, space) == 3 { + pattern = "VALUE %s %d %d\r\n" + dest = dest[:3] + } + n, err := fmt.Sscanf(string(line), pattern, dest...) + if err != nil || n != len(dest) { + return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line) + } + return size, nil +} + +// Set writes the given item, unconditionally. +func (c *Client) Set(item *Item) error { + return c.onItem(item, (*Client).set) +} + +func (c *Client) set(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "set", item) +} + +// Add writes the given item, if no value already exists for its +// key. ErrNotStored is returned if that condition is not met. +func (c *Client) Add(item *Item) error { + return c.onItem(item, (*Client).add) +} + +func (c *Client) add(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "add", item) +} + +// Replace writes the given item, but only if the server *does* +// already hold data for this key +func (c *Client) Replace(item *Item) error { + return c.onItem(item, (*Client).replace) +} + +func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "replace", item) +} + +// CompareAndSwap writes the given item that was previously returned +// by Get, if the value was neither modified or evicted between the +// Get and the CompareAndSwap calls. The item's Key should not change +// between calls but all other item fields may differ. ErrCASConflict +// is returned if the value was modified in between the +// calls. ErrNotStored is returned if the value was evicted in between +// the calls. +func (c *Client) CompareAndSwap(item *Item) error { + return c.onItem(item, (*Client).cas) +} + +func (c *Client) cas(rw *bufio.ReadWriter, item *Item) error { + return c.populateOne(rw, "cas", item) +} + +func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) error { + if !legalKey(item.Key) { + return ErrMalformedKey + } + var err error + if verb == "cas" { + _, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n", + verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid) + } else { + _, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n", + verb, item.Key, item.Flags, item.Expiration, len(item.Value)) + } + if err != nil { + return err + } + if _, err = rw.Write(item.Value); err != nil { + return err + } + if _, err := rw.Write(crlf); err != nil { + return err + } + if err := rw.Flush(); err != nil { + return err + } + line, err := rw.ReadSlice('\n') + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultStored): + return nil + case bytes.Equal(line, resultNotStored): + return ErrNotStored + case bytes.Equal(line, resultExists): + return ErrCASConflict + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + } + return fmt.Errorf("memcache: unexpected response line from %q: %q", verb, string(line)) +} + +func writeReadLine(rw *bufio.ReadWriter, format string, args ...interface{}) ([]byte, error) { + _, err := fmt.Fprintf(rw, format, args...) + if err != nil { + return nil, err + } + if err := rw.Flush(); err != nil { + return nil, err + } + line, err := rw.ReadSlice('\n') + return line, err +} + +func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...interface{}) error { + line, err := writeReadLine(rw, format, args...) + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultOK): + return nil + case bytes.Equal(line, expect): + return nil + case bytes.Equal(line, resultNotStored): + return ErrNotStored + case bytes.Equal(line, resultExists): + return ErrCASConflict + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + } + return fmt.Errorf("memcache: unexpected response line: %q", string(line)) +} + +// Delete deletes the item with the provided key. The error ErrCacheMiss is +// returned if the item didn't already exist in the cache. +func (c *Client) Delete(key string) error { + return c.withKeyRw(key, func(rw *bufio.ReadWriter) error { + return writeExpectf(rw, resultDeleted, "delete %s\r\n", key) + }) +} + +// DeleteAll deletes all items in the cache. +func (c *Client) DeleteAll() error { + return c.withKeyRw("", func(rw *bufio.ReadWriter) error { + return writeExpectf(rw, resultDeleted, "flush_all\r\n") + }) +} + +// Increment atomically increments key by delta. The return value is +// the new value after being incremented or an error. If the value +// didn't exist in memcached the error is ErrCacheMiss. The value in +// memcached must be an decimal number, or an error will be returned. +// On 64-bit overflow, the new value wraps around. +func (c *Client) Increment(key string, delta uint64) (newValue uint64, err error) { + return c.incrDecr("incr", key, delta) +} + +// Decrement atomically decrements key by delta. The return value is +// the new value after being decremented or an error. If the value +// didn't exist in memcached the error is ErrCacheMiss. The value in +// memcached must be an decimal number, or an error will be returned. +// On underflow, the new value is capped at zero and does not wrap +// around. +func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error) { + return c.incrDecr("decr", key, delta) +} + +func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) { + var val uint64 + err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error { + line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta) + if err != nil { + return err + } + switch { + case bytes.Equal(line, resultNotFound): + return ErrCacheMiss + case bytes.HasPrefix(line, resultClientErrorPrefix): + errMsg := line[len(resultClientErrorPrefix) : len(line)-2] + return errors.New("memcache: client error: " + string(errMsg)) + } + val, err = strconv.ParseUint(string(line[:len(line)-2]), 10, 64) + if err != nil { + return err + } + return nil + }) + return val, err +} diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go new file mode 100644 index 0000000..89ad81e --- /dev/null +++ b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go @@ -0,0 +1,129 @@ +/* +Copyright 2011 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package memcache + +import ( + "hash/crc32" + "net" + "strings" + "sync" +) + +// ServerSelector is the interface that selects a memcache server +// as a function of the item's key. +// +// All ServerSelector implementations must be safe for concurrent use +// by multiple goroutines. +type ServerSelector interface { + // PickServer returns the server address that a given item + // should be shared onto. + PickServer(key string) (net.Addr, error) + Each(func(net.Addr) error) error +} + +// ServerList is a simple ServerSelector. Its zero value is usable. +type ServerList struct { + mu sync.RWMutex + addrs []net.Addr +} + +// staticAddr caches the Network() and String() values from any net.Addr. +type staticAddr struct { + ntw, str string +} + +func newStaticAddr(a net.Addr) net.Addr { + return &staticAddr{ + ntw: a.Network(), + str: a.String(), + } +} + +func (s *staticAddr) Network() string { return s.ntw } +func (s *staticAddr) String() string { return s.str } + +// SetServers changes a ServerList's set of servers at runtime and is +// safe for concurrent use by multiple goroutines. +// +// Each server is given equal weight. A server is given more weight +// if it's listed multiple times. +// +// SetServers returns an error if any of the server names fail to +// resolve. No attempt is made to connect to the server. If any error +// is returned, no changes are made to the ServerList. +func (ss *ServerList) SetServers(servers ...string) error { + naddr := make([]net.Addr, len(servers)) + for i, server := range servers { + if strings.Contains(server, "/") { + addr, err := net.ResolveUnixAddr("unix", server) + if err != nil { + return err + } + naddr[i] = newStaticAddr(addr) + } else { + tcpaddr, err := net.ResolveTCPAddr("tcp", server) + if err != nil { + return err + } + naddr[i] = newStaticAddr(tcpaddr) + } + } + + ss.mu.Lock() + defer ss.mu.Unlock() + ss.addrs = naddr + return nil +} + +// Each iterates over each server calling the given function +func (ss *ServerList) Each(f func(net.Addr) error) error { + ss.mu.RLock() + defer ss.mu.RUnlock() + for _, a := range ss.addrs { + if err := f(a); nil != err { + return err + } + } + return nil +} + +// keyBufPool returns []byte buffers for use by PickServer's call to +// crc32.ChecksumIEEE to avoid allocations. (but doesn't avoid the +// copies, which at least are bounded in size and small) +var keyBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 256) + return &b + }, +} + +func (ss *ServerList) PickServer(key string) (net.Addr, error) { + ss.mu.RLock() + defer ss.mu.RUnlock() + if len(ss.addrs) == 0 { + return nil, ErrNoServers + } + if len(ss.addrs) == 1 { + return ss.addrs[0], nil + } + bufp := keyBufPool.Get().(*[]byte) + n := copy(*bufp, key) + cs := crc32.ChecksumIEEE((*bufp)[:n]) + keyBufPool.Put(bufp) + + return ss.addrs[cs%uint32(len(ss.addrs))], nil +} diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore new file mode 100644 index 0000000..1241112 --- /dev/null +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -0,0 +1,6 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea +/.vscode diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml new file mode 100644 index 0000000..08682ef --- /dev/null +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + +install: skip +script: + - go get golang.org/x/tools/cmd/cover + - go get github.com/smartystreets/goconvey + - mkdir -p $HOME/gopath/src/gopkg.in + - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1 + - cd $HOME/gopath/src/gopkg.in/ini.v1 + - go test -v -cover -race diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 0000000..d361bbc --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 0000000..af27ff0 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,15 @@ +.PHONY: build test bench vet coverage + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet + +coverage: + go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 0000000..036c56d --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,54 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://img.shields.io/badge/view%20on-Sourcegraph-brightgreen.svg)](https://sourcegraph.com/github.com/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +## Features + +- Load from multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +The minimum requirement of Go is **1.6**. + +```sh +$ go get gopkg.in/ini.v1 +``` + +Please add `-u` flag to update in the future. + +## Go Modules + +For historical reason, people use two different import paths for this package: `github.com/go-ini/ini` and `gopkg.in/ini.v1`. If you get error similar to the following one: + +``` +go: finding github.com/go-ini/ini v0.0.0-00010101000000-000000000000 +go: github.com/go-ini/ini@v0.0.0-00010101000000-000000000000: unknown revision 000000000000 +go: error loading module requirements +``` + +It is because one of your dependencies is using deprecated import path `github.com/go-ini/ini`, you can make a quick fix by adding the following line to your `go.mod` file (`v.1.44.0` was the latest version tagged on `master` branch): + +``` +replace github.com/go-ini/ini => gopkg.in/ini.v1 v1.44.0 +``` + +## Getting Help + +- [Getting Started](https://ini.unknwon.io/docs/intro/getting_started) +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 0000000..d88347c --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,34 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +// ErrDelimiterNotFound indicates the error type of no delimiter is found which there should be one. +type ErrDelimiterNotFound struct { + Line string +} + +// IsErrDelimiterNotFound returns true if the given error is an instance of ErrDelimiterNotFound. +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go new file mode 100644 index 0000000..b38aadd --- /dev/null +++ b/vendor/github.com/go-ini/ini/file.go @@ -0,0 +1,418 @@ +// Copyright 2017 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "sync" +) + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + options LoadOptions + dataSources []dataSource + + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + lock sync.RWMutex + + // To keep data in order. + sectionList []string + // Actual data is stored here. + sections map[string]*Section + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + if len(opts.KeyValueDelimiters) == 0 { + opts.KeyValueDelimiters = "=:" + } + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DefaultSection { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DefaultSection + } + if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Sections returns a list of Section stored in the current instance. +func (f *File) Sections() []*Section { + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sections := make([]*Section, len(f.sectionList)) + for i, name := range f.sectionList { + sections[i] = f.sections[name] + } + return sections +} + +// ChildSections returns a list of child sections of given section name. +func (f *File) ChildSections(name string) []*Section { + return f.Section(name).ChildSections() +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DefaultSection + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { + equalSign := DefaultFormatLeft + "=" + DefaultFormatRight + + if PrettyFormat || PrettyEqual { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + // Support multiline comments + lines := strings.Split(sec.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + lines[i] + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if i > 0 || DefaultHeader { + if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return nil, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err := buf.WriteString(sec.rawBody); err != nil { + return nil, err + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters) { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + // Support multiline comments + lines := strings.Split(key.Comment, LineBreak) + for i := range lines { + if lines[i][0] != '#' && lines[i][0] != ';' { + lines[i] = "; " + strings.TrimSpace(lines[i]) + } else { + lines[i] = lines[i][:1] + " " + strings.TrimSpace(lines[i][1:]) + } + + if _, err := buf.WriteString(lines[i] + LineBreak); err != nil { + return nil, err + } + } + } + + if len(indent) > 0 && sname != DefaultSection { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.Contains(kname, "\"") || strings.ContainsAny(kname, f.options.KeyValueDelimiters): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err := buf.WriteString(kname); err != nil { + return nil, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { + return nil, err + } + } + + for _, val := range key.nestedValues { + if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { + return nil, err + } + } + } + + if PrettySection { + // Put a line between sections + if _, err := buf.WriteString(LineBreak); err != nil { + return nil, err + } + } + } + + return buf, nil +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { + buf, err := f.writeToBuffer(indent) + if err != nil { + return 0, err + } + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + buf, err := f.writeToBuffer(indent) + if err != nil { + return err + } + + return ioutil.WriteFile(filename, buf.Bytes(), 0666) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 0000000..36c072c --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,223 @@ +// +build go1.6 + +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" +) + +const ( + // DefaultSection is the name of default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DefaultSection = "DEFAULT" + // Deprecated: Use "DefaultSection" instead. + DEFAULT_SECTION = DefaultSection + + // Maximum allowed depth when recursively substituing variable names. + depthValues = 99 + version = "1.46.0" +) + +// Version returns current package version literal. +func Version() string { + return version +} + +var ( + // LineBreak is the delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows at package init time. + LineBreak = "\n" + + // DefaultFormatLeft places custom spaces on the left when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatLeft = "" + // DefaultFormatRight places custom spaces on the right when PrettyFormat and PrettyEqual are both disabled. + DefaultFormatRight = "" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // PrettyFormat indicates whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // PrettyEqual places spaces around "=" sign even when PrettyFormat is false. + PrettyEqual = false + + // DefaultHeader explicitly writes default section header. + DefaultHeader = false + + // PrettySection indicates whether to put a line between sections. + PrettySection = true +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +// LoadOptions contains all customized options used for load data source(s). +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. + IgnoreInlineComment bool + // SkipUnrecognizableLines indicates whether to skip unrecognizable lines that do not conform to key/value pairs. + SkipUnrecognizableLines bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // AllowNestedValues indicates whether to allow AWS-like nested values. + // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values + AllowNestedValues bool + // AllowPythonMultilineValues indicates whether to allow Python-like multi-line values. + // Docs: https://docs.python.org/3/library/configparser.html#supported-ini-file-structure + // Relevant quote: Values can also span multiple lines, as long as they are indented deeper + // than the first line of the value. + AllowPythonMultilineValues bool + // SpaceBeforeInlineComment indicates whether to allow comment symbols (\# and \;) inside value. + // Docs: https://docs.python.org/2/library/configparser.html + // Quote: Comments may appear on their own in an otherwise empty line, or may be entered in lines holding values or section names. + // In the latter case, they need to be preceded by a whitespace character to be recognized as a comment. + SpaceBeforeInlineComment bool + // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format + // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" + UnescapeValueDoubleQuotes bool + // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format + // when value is NOT surrounded by any quotes. + // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. + UnescapeValueCommentSymbols bool + // UnparseableSections stores a list of blocks that are allowed with raw content which do not otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string + // KeyValueDelimiters is the sequence of delimiters that are used to separate key and value. By default, it is "=:". + KeyValueDelimiters string + // PreserveSurroundedQuote indicates whether to preserve surrounded quote (single and double quotes). + PreserveSurroundedQuote bool +} + +// LoadSources allows caller to apply customized options for loading from data source(s). +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// ShadowLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 0000000..38860ff --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,753 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + Comment string + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + nestedValues []string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +func (k *Key) addNestedValue(val string) error { + if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add nested value to auto-increment or boolean key") + } + + k.nestedValues = append(k.nestedValues, val) + return nil +} + +// AddNestedValue adds a nested value to the key. +func (k *Key) AddNestedValue(val string) error { + if !k.s.f.options.AllowNestedValues { + return errors.New("nested value is not allowed") + } + return k.addNestedValue(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// NestedValues returns nested values stored in the key. +// It is possible returned value is nil if no nested values stored in the key. +func (k *Key) NestedValues() []string { + return k.nestedValues +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < depthValues; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := vr[2 : len(vr)-2] + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil || k == nk { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + v, err := strconv.ParseInt(k.String(), 0, 64) + return int(v), err +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 0, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 0, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 0, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + runes := []rune(str) + vals := make([]string, 0, 2) + var buf bytes.Buffer + escape := false + idx := 0 + for { + if escape { + escape = false + if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { + buf.WriteRune('\\') + } + buf.WriteRune(runes[idx]) + } else { + if runes[idx] == '\\' { + escape = true + } else if strings.HasPrefix(string(runes[idx:]), delim) { + idx += len(delim) - 1 + vals = append(vals, strings.TrimSpace(buf.String())) + buf.Reset() + } else { + buf.WriteRune(runes[idx]) + } + } + idx++ + if idx == len(runes) { + break + } + } + + if buf.Len() > 0 { + vals = append(vals, strings.TrimSpace(buf.String())) + } + + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.parseFloat64s(k.Strings(delim), false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.parseUints(k.Strings(delim), false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.parseUint64s(k.Strings(delim), false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.parseFloat64s(k.Strings(delim), false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.parseUints(k.Strings(delim), false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.parseUint64s(k.Strings(delim), false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.parseTimesFormat(format, k.Strings(delim), false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// parseFloat64s transforms strings to float64s. +func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + valInt64, err := strconv.ParseInt(str, 0, 64) + val := int(valInt64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 0, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseUints transforms strings to uints. +func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 0, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// parseUint64s transforms strings to uint64s. +func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 0, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseTimesFormat transforms strings to times in given format. +func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 0000000..7c22a25 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,487 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "strings" + "unicode" +) + +var pythonMultiline = regexp.MustCompile("^(\\s+)([^\n]+)") + +type parserOptions struct { + IgnoreContinuation bool + IgnoreInlineComment bool + AllowPythonMultilineValues bool + SpaceBeforeInlineComment bool + UnescapeValueDoubleQuotes bool + UnescapeValueCommentSymbols bool + PreserveSurroundedQuote bool +} + +type parser struct { + buf *bufio.Reader + options parserOptions + + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader, opts parserOptions) *parser { + return &parser{ + buf: bufio.NewReader(r), + options: opts, + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(delimiters string, in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], delimiters) + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, delimiters) + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, bufferSize int) (string, error) { + + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + if p.options.AllowPythonMultilineValues && len(in) > 0 && in[len(in)-1] == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } else if p.options.UnescapeValueDoubleQuotes && line[0] == '"' { + valQuote = `"` + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + if p.options.UnescapeValueDoubleQuotes && valQuote == `"` { + return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil + } + return line[startIdx : pos+startIdx], nil + } + + lastChar := line[len(line)-1] + // Won't be able to reach here if value only contains whitespace + line = strings.TrimSpace(line) + trimmedLastChar := line[len(line)-1] + + // Check continuation lines when desired + if !p.options.IgnoreContinuation && trimmedLastChar == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + // Check if ignore inline comment + if !p.options.IgnoreInlineComment { + var i int + if p.options.SpaceBeforeInlineComment { + i = strings.Index(line, " #") + if i == -1 { + i = strings.Index(line, " ;") + } + + } else { + i = strings.IndexAny(line, "#;") + } + + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + } + + // Trim single and double quotes + if (hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"')) && !p.options.PreserveSurroundedQuote { + line = line[1 : len(line)-1] + } else if len(valQuote) == 0 && p.options.UnescapeValueCommentSymbols { + if strings.Contains(line, `\;`) { + line = strings.Replace(line, `\;`, ";", -1) + } + if strings.Contains(line, `\#`) { + line = strings.Replace(line, `\#`, "#", -1) + } + } else if p.options.AllowPythonMultilineValues && lastChar == '\n' { + return p.readPythonMultilines(line, bufferSize) + } + + return line, nil +} + +func (p *parser) readPythonMultilines(line string, bufferSize int) (string, error) { + parserBufferPeekResult, _ := p.buf.Peek(bufferSize) + peekBuffer := bytes.NewBuffer(parserBufferPeekResult) + + for { + peekData, peekErr := peekBuffer.ReadBytes('\n') + if peekErr != nil { + if peekErr == io.EOF { + return line, nil + } + return "", peekErr + } + + peekMatches := pythonMultiline.FindStringSubmatch(string(peekData)) + if len(peekMatches) != 3 { + return line, nil + } + + // NOTE: Return if not a python-ini multi-line value. + currentIdentSize := len(peekMatches[1]) + if currentIdentSize <= 0 { + return line, nil + } + + // NOTE: Just advance the parser reader (buffer) in-sync with the peek buffer. + _, err := p.readUntil('\n') + if err != nil { + return "", err + } + + line += fmt.Sprintf("\n%s", peekMatches[2]) + } +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader, parserOptions{ + IgnoreContinuation: f.options.IgnoreContinuation, + IgnoreInlineComment: f.options.IgnoreInlineComment, + AllowPythonMultilineValues: f.options.AllowPythonMultilineValues, + SpaceBeforeInlineComment: f.options.SpaceBeforeInlineComment, + UnescapeValueDoubleQuotes: f.options.UnescapeValueDoubleQuotes, + UnescapeValueCommentSymbols: f.options.UnescapeValueCommentSymbols, + PreserveSurroundedQuote: f.options.PreserveSurroundedQuote, + }) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + name := DefaultSection + if f.options.Insensitive { + name = strings.ToLower(DefaultSection) + } + section, _ := f.NewSection(name) + + // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key + var isLastValueEmpty bool + var lastRegularKey *Key + + var line []byte + var inUnparseableSection bool + + // NOTE: Iterate and increase `currentPeekSize` until + // the size of the parser buffer is found. + // TODO(unknwon): When Golang 1.10 is the lowest version supported, replace with `parserBufferSize := p.buf.Size()`. + parserBufferSize := 0 + // NOTE: Peek 1kb at a time. + currentPeekSize := 1024 + + if f.options.AllowPythonMultilineValues { + for { + peekBytes, _ := p.buf.Peek(currentPeekSize) + peekBytesLength := len(peekBytes) + + if parserBufferSize >= peekBytesLength { + break + } + + currentPeekSize *= 2 + parserBufferSize = peekBytesLength + } + } + + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + if f.options.AllowNestedValues && + isLastValueEmpty && len(line) > 0 { + if line[0] == ' ' || line[0] == '\t' { + lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) + continue + } + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + closeIdx := bytes.LastIndexByte(line, ']') + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(f.options.KeyValueDelimiters, line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) { + switch { + case f.options.AllowBooleanKeys: + kname, err := p.readValue(line, parserBufferSize) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + + case f.options.SkipUnrecognizableLines: + continue + } + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], parserBufferSize) + if err != nil { + return err + } + isLastValueEmpty = len(value) == 0 + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + lastRegularKey = key + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 0000000..0bd3e13 --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,256 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// SetBody updates body content only if section is raw. +func (s *Section) SetBody(body string) { + if !s.isRawSection { + return + } + s.rawBody = body +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + s.keysHash[name] = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } + break + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Deprecated: Use "HasKey" instead. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + delete(s.keysHash, name) + return + } + } +} + +// ChildSections returns a list of child sections of current section. +// For example, "[parent.child1]" and "[parent.child12]" are child sections +// of section "[parent]". +func (s *Section) ChildSections() []*Section { + prefix := s.name + "." + children := make([]*Section, 0, 3) + for _, name := range s.f.sectionList { + if strings.HasPrefix(name, prefix) { + children = append(children, s.f.sections[name]) + } + } + return children +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 0000000..c713f82 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,563 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + var err error + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, err = key.parseInts(strs, true, false) + case reflect.Int64: + vals, err = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals, err = key.parseUints(strs, true, false) + case reflect.Uint64: + vals, err = key.parseUint64s(strs, true, false) + case reflect.Float64: + vals, err = key.parseFloat64s(strs, true, false) + case reflectTime: + vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + if err != nil && isStrict { + return err + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +func wrapStrictError(err error, isStrict bool) error { + if isStrict { + return err + } + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to struct. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && uint64(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow, isStrict) + case reflect.Ptr: + switch t.Elem().Kind() { + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return wrapStrictError(err, isStrict) + } + field.Set(reflect.ValueOf(&boolVal)) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value, isStrict bool) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isStruct := tpField.Type.Kind() == reflect.Struct + isStructPtr := tpField.Type.Kind() == reflect.Ptr && tpField.Type.Elem().Kind() == reflect.Struct + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct || isStructPtr { + if sec, err := s.f.GetSection(fieldName); err == nil { + // Only set the field to non-nil struct value if we have + // a section for it. Otherwise, we end up with a non-nil + // struct ptr even though there is no data. + if isStructPtr && field.IsNil() { + field.Set(reflect.New(tpField.Type.Elem())) + } + if err = sec.mapTo(field, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, false) +} + +// StrictMapTo maps section to given struct in strict mode, +// which returns all possible error including value parsing error. +func (s *Section) StrictMapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val, true) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// StrictMapTo maps file to given struct in strict mode, +// which returns all possible error including value parsing error. +func (f *File) StrictMapTo(v interface{}) error { + return f.Section("").StrictMapTo(v) +} + +// MapToWithMapper maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, +// which returns all possible error including value parsing error. +func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.StrictMapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// StrictMapTo maps data sources to given struct in strict mode, +// which returns all possible error including value parsing error. +func StrictMapTo(v, source interface{}, others ...interface{}) error { + return StrictMapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + sliceOf := field.Type().Elem().Kind() + + if allowShadow { + var keyWithShadows *Key + for i := 0; i < field.Len(); i++ { + var val string + switch sliceOf { + case reflect.String: + val = slice.Index(i).String() + case reflect.Int, reflect.Int64: + val = fmt.Sprint(slice.Index(i).Int()) + case reflect.Uint, reflect.Uint64: + val = fmt.Sprint(slice.Index(i).Uint()) + case reflect.Float64: + val = fmt.Sprint(slice.Index(i).Float()) + case reflectTime: + val = slice.Index(i).Interface().(time.Time).Format(time.RFC3339) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + if i == 0 { + keyWithShadows = newKey(key.s, key.name, val) + } else { + keyWithShadows.AddShadow(val) + } + } + key = keyWithShadows + return nil + } + + var buf bytes.Buffer + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-len(delim)]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim, allowShadow) + case reflect.Ptr: + if !field.IsNil() { + return reflectWithProperType(t.Elem(), key, field.Elem(), delim, allowShadow) + } + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflectTime: + t, ok := v.Interface().(time.Time) + return ok && t.IsZero() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, omitEmpty, allowShadow := parseTagOptions(tag) + if omitEmpty && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + + // Add comment from comment tag + if len(sec.Comment) == 0 { + sec.Comment = tpField.Tag.Get("comment") + } + + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + + // Add comment from comment tag + if len(key.Comment) == 0 { + key.Comment = tpField.Tag.Get("comment") + } + + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim")), allowShadow); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFromWithMapper reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/go-playground/locales/.gitignore b/vendor/github.com/go-playground/locales/.gitignore new file mode 100644 index 0000000..daf913b --- /dev/null +++ b/vendor/github.com/go-playground/locales/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/go-playground/locales/LICENSE b/vendor/github.com/go-playground/locales/LICENSE new file mode 100644 index 0000000..75854ac --- /dev/null +++ b/vendor/github.com/go-playground/locales/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md new file mode 100644 index 0000000..43329f8 --- /dev/null +++ b/vendor/github.com/go-playground/locales/README.md @@ -0,0 +1,172 @@ +## locales +![Project status](https://img.shields.io/badge/version-0.12.1-green.svg) +[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/locales/branches/master/badge.svg)](https://semaphoreci.com/joeybloggs/locales) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales) +[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within +an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator). + +Features +-------- +- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) + +Full Tests +-------------------- +I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment; +any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details. + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/locales +``` + +NOTES +-------- +You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body +of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string. + +Usage +------- +```go +package main + +import ( + "fmt" + "time" + + "github.com/go-playground/locales/currency" + "github.com/go-playground/locales/en_CA" +) + +func main() { + + loc, _ := time.LoadLocation("America/Toronto") + datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc) + + l := en_CA.New() + + // Dates + fmt.Println(l.FmtDateFull(datetime)) + fmt.Println(l.FmtDateLong(datetime)) + fmt.Println(l.FmtDateMedium(datetime)) + fmt.Println(l.FmtDateShort(datetime)) + + // Times + fmt.Println(l.FmtTimeFull(datetime)) + fmt.Println(l.FmtTimeLong(datetime)) + fmt.Println(l.FmtTimeMedium(datetime)) + fmt.Println(l.FmtTimeShort(datetime)) + + // Months Wide + fmt.Println(l.MonthWide(time.January)) + fmt.Println(l.MonthWide(time.February)) + fmt.Println(l.MonthWide(time.March)) + // ... + + // Months Abbreviated + fmt.Println(l.MonthAbbreviated(time.January)) + fmt.Println(l.MonthAbbreviated(time.February)) + fmt.Println(l.MonthAbbreviated(time.March)) + // ... + + // Months Narrow + fmt.Println(l.MonthNarrow(time.January)) + fmt.Println(l.MonthNarrow(time.February)) + fmt.Println(l.MonthNarrow(time.March)) + // ... + + // Weekdays Wide + fmt.Println(l.WeekdayWide(time.Sunday)) + fmt.Println(l.WeekdayWide(time.Monday)) + fmt.Println(l.WeekdayWide(time.Tuesday)) + // ... + + // Weekdays Abbreviated + fmt.Println(l.WeekdayAbbreviated(time.Sunday)) + fmt.Println(l.WeekdayAbbreviated(time.Monday)) + fmt.Println(l.WeekdayAbbreviated(time.Tuesday)) + // ... + + // Weekdays Short + fmt.Println(l.WeekdayShort(time.Sunday)) + fmt.Println(l.WeekdayShort(time.Monday)) + fmt.Println(l.WeekdayShort(time.Tuesday)) + // ... + + // Weekdays Narrow + fmt.Println(l.WeekdayNarrow(time.Sunday)) + fmt.Println(l.WeekdayNarrow(time.Monday)) + fmt.Println(l.WeekdayNarrow(time.Tuesday)) + // ... + + var f64 float64 + + f64 = -10356.4523 + + // Number + fmt.Println(l.FmtNumber(f64, 2)) + + // Currency + fmt.Println(l.FmtCurrency(f64, 2, currency.CAD)) + fmt.Println(l.FmtCurrency(f64, 2, currency.USD)) + + // Accounting + fmt.Println(l.FmtAccounting(f64, 2, currency.CAD)) + fmt.Println(l.FmtAccounting(f64, 2, currency.USD)) + + f64 = 78.12 + + // Percent + fmt.Println(l.FmtPercent(f64, 0)) + + // Plural Rules for locale, so you know what rules you must cover + fmt.Println(l.PluralsCardinal()) + fmt.Println(l.PluralsOrdinal()) + + // Cardinal Plural Rules + fmt.Println(l.CardinalPluralRule(1, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 1)) + fmt.Println(l.CardinalPluralRule(3, 0)) + + // Ordinal Plural Rules + fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st + fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd + fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd + fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th + + // Range Plural Rules + fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1 + fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2 + fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8 +} +``` + +NOTES: +------- +These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues +I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time +these locales are regenerated the fix will come with. + +I do however realize that time constraints are often important and so there are two options: + +1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface. +2. Add an exception in the locale generation code directly and once regenerated, fix will be in place. + +Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go new file mode 100644 index 0000000..cdaba59 --- /dev/null +++ b/vendor/github.com/go-playground/locales/currency/currency.go @@ -0,0 +1,308 @@ +package currency + +// Type is the currency type associated with the locales currency enum +type Type int + +// locale currencies +const ( + ADP Type = iota + AED + AFA + AFN + ALK + ALL + AMD + ANG + AOA + AOK + AON + AOR + ARA + ARL + ARM + ARP + ARS + ATS + AUD + AWG + AZM + AZN + BAD + BAM + BAN + BBD + BDT + BEC + BEF + BEL + BGL + BGM + BGN + BGO + BHD + BIF + BMD + BND + BOB + BOL + BOP + BOV + BRB + BRC + BRE + BRL + BRN + BRR + BRZ + BSD + BTN + BUK + BWP + BYB + BYN + BYR + BZD + CAD + CDF + CHE + CHF + CHW + CLE + CLF + CLP + CNH + CNX + CNY + COP + COU + CRC + CSD + CSK + CUC + CUP + CVE + CYP + CZK + DDM + DEM + DJF + DKK + DOP + DZD + ECS + ECV + EEK + EGP + ERN + ESA + ESB + ESP + ETB + EUR + FIM + FJD + FKP + FRF + GBP + GEK + GEL + GHC + GHS + GIP + GMD + GNF + GNS + GQE + GRD + GTQ + GWE + GWP + GYD + HKD + HNL + HRD + HRK + HTG + HUF + IDR + IEP + ILP + ILR + ILS + INR + IQD + IRR + ISJ + ISK + ITL + JMD + JOD + JPY + KES + KGS + KHR + KMF + KPW + KRH + KRO + KRW + KWD + KYD + KZT + LAK + LBP + LKR + LRD + LSL + LTL + LTT + LUC + LUF + LUL + LVL + LVR + LYD + MAD + MAF + MCF + MDC + MDL + MGA + MGF + MKD + MKN + MLF + MMK + MNT + MOP + MRO + MTL + MTP + MUR + MVP + MVR + MWK + MXN + MXP + MXV + MYR + MZE + MZM + MZN + NAD + NGN + NIC + NIO + NLG + NOK + NPR + NZD + OMR + PAB + PEI + PEN + PES + PGK + PHP + PKR + PLN + PLZ + PTE + PYG + QAR + RHD + ROL + RON + RSD + RUB + RUR + RWF + SAR + SBD + SCR + SDD + SDG + SDP + SEK + SGD + SHP + SIT + SKK + SLL + SOS + SRD + SRG + SSP + STD + STN + SUR + SVC + SYP + SZL + THB + TJR + TJS + TMM + TMT + TND + TOP + TPE + TRL + TRY + TTD + TWD + TZS + UAH + UAK + UGS + UGX + USD + USN + USS + UYI + UYP + UYU + UZS + VEB + VEF + VND + VNN + VUV + WST + XAF + XAG + XAU + XBA + XBB + XBC + XBD + XCD + XDR + XEU + XFO + XFU + XOF + XPD + XPF + XPT + XRE + XSU + XTS + XUA + XXX + YDD + YER + YUD + YUM + YUN + YUR + ZAL + ZAR + ZMK + ZMW + ZRN + ZRZ + ZWD + ZWL + ZWR +) diff --git a/vendor/github.com/go-playground/locales/logo.png b/vendor/github.com/go-playground/locales/logo.png new file mode 100644 index 0000000..3038276 Binary files /dev/null and b/vendor/github.com/go-playground/locales/logo.png differ diff --git a/vendor/github.com/go-playground/locales/rules.go b/vendor/github.com/go-playground/locales/rules.go new file mode 100644 index 0000000..9202900 --- /dev/null +++ b/vendor/github.com/go-playground/locales/rules.go @@ -0,0 +1,293 @@ +package locales + +import ( + "strconv" + "time" + + "github.com/go-playground/locales/currency" +) + +// // ErrBadNumberValue is returned when the number passed for +// // plural rule determination cannot be parsed +// type ErrBadNumberValue struct { +// NumberValue string +// InnerError error +// } + +// // Error returns ErrBadNumberValue error string +// func (e *ErrBadNumberValue) Error() string { +// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError) +// } + +// var _ error = new(ErrBadNumberValue) + +// PluralRule denotes the type of plural rules +type PluralRule int + +// PluralRule's +const ( + PluralRuleUnknown PluralRule = iota + PluralRuleZero // zero + PluralRuleOne // one - singular + PluralRuleTwo // two - dual + PluralRuleFew // few - paucal + PluralRuleMany // many - also used for fractions if they have a separate class + PluralRuleOther // other - required—general plural form—also used if the language only has a single form +) + +const ( + pluralsString = "UnknownZeroOneTwoFewManyOther" +) + +// Translator encapsulates an instance of a locale +// NOTE: some values are returned as a []byte just in case the caller +// wishes to add more and can help avoid allocations; otherwise just cast as string +type Translator interface { + + // The following Functions are for overriding, debugging or developing + // with a Translator Locale + + // Locale returns the string value of the translator + Locale() string + + // returns an array of cardinal plural rules associated + // with this translator + PluralsCardinal() []PluralRule + + // returns an array of ordinal plural rules associated + // with this translator + PluralsOrdinal() []PluralRule + + // returns an array of range plural rules associated + // with this translator + PluralsRange() []PluralRule + + // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale + CardinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale + OrdinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale + RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule + + // returns the locales abbreviated month given the 'month' provided + MonthAbbreviated(month time.Month) string + + // returns the locales abbreviated months + MonthsAbbreviated() []string + + // returns the locales narrow month given the 'month' provided + MonthNarrow(month time.Month) string + + // returns the locales narrow months + MonthsNarrow() []string + + // returns the locales wide month given the 'month' provided + MonthWide(month time.Month) string + + // returns the locales wide months + MonthsWide() []string + + // returns the locales abbreviated weekday given the 'weekday' provided + WeekdayAbbreviated(weekday time.Weekday) string + + // returns the locales abbreviated weekdays + WeekdaysAbbreviated() []string + + // returns the locales narrow weekday given the 'weekday' provided + WeekdayNarrow(weekday time.Weekday) string + + // WeekdaysNarrowreturns the locales narrow weekdays + WeekdaysNarrow() []string + + // returns the locales short weekday given the 'weekday' provided + WeekdayShort(weekday time.Weekday) string + + // returns the locales short weekdays + WeekdaysShort() []string + + // returns the locales wide weekday given the 'weekday' provided + WeekdayWide(weekday time.Weekday) string + + // returns the locales wide weekdays + WeekdaysWide() []string + + // The following Functions are common Formatting functionsfor the Translator's Locale + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + FmtNumber(num float64, v uint64) string + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + // NOTE: 'num' passed into FmtPercent is assumed to be in percent already + FmtPercent(num float64, v uint64) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + FmtCurrency(num float64, v uint64, currency currency.Type) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + // in accounting notation. + FmtAccounting(num float64, v uint64, currency currency.Type) string + + // returns the short date representation of 't' for locale + FmtDateShort(t time.Time) string + + // returns the medium date representation of 't' for locale + FmtDateMedium(t time.Time) string + + // returns the long date representation of 't' for locale + FmtDateLong(t time.Time) string + + // returns the full date representation of 't' for locale + FmtDateFull(t time.Time) string + + // returns the short time representation of 't' for locale + FmtTimeShort(t time.Time) string + + // returns the medium time representation of 't' for locale + FmtTimeMedium(t time.Time) string + + // returns the long time representation of 't' for locale + FmtTimeLong(t time.Time) string + + // returns the full time representation of 't' for locale + FmtTimeFull(t time.Time) string +} + +// String returns the string value of PluralRule +func (p PluralRule) String() string { + + switch p { + case PluralRuleZero: + return pluralsString[7:11] + case PluralRuleOne: + return pluralsString[11:14] + case PluralRuleTwo: + return pluralsString[14:17] + case PluralRuleFew: + return pluralsString[17:20] + case PluralRuleMany: + return pluralsString[20:24] + case PluralRuleOther: + return pluralsString[24:] + default: + return pluralsString[:7] + } +} + +// +// Precision Notes: +// +// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh +// +// v := float64(3.141) +// i := float64(int64(v)) +// +// fmt.Println(v - i) +// +// or +// +// s := strconv.FormatFloat(v-i, 'f', -1, 64) +// fmt.Println(s) +// +// these will not print what you'd expect: 0.14100000000000001 +// and so this library requires a precision to be specified, or +// inaccurate plural rules could be applied. +// +// +// +// n - absolute value of the source number (integer and decimals). +// i - integer digits of n. +// v - number of visible fraction digits in n, with trailing zeros. +// w - number of visible fraction digits in n, without trailing zeros. +// f - visible fractional digits in n, with trailing zeros. +// t - visible fractional digits in n, without trailing zeros. +// +// +// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above. +// +// n := math.Abs(num) +// i := int64(n) +// v := v +// +// +// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64 +// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// +// +// +// General Inclusion Rules +// - v will always be available inherently +// - all require n +// - w requires i +// + +// W returns the number of visible fraction digits in N, without trailing zeros. +func W(n float64, v uint64) (w int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then w will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + w = int64(len(s[:end])) + } + + return +} + +// F returns the visible fractional digits in N, with trailing zeros. +func F(n float64, v uint64) (f int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then f will be zero + // otherwise need to parse + if len(s) != 1 { + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + f, _ = strconv.ParseInt(s[2:], 10, 64) + } + + return +} + +// T returns the visible fractional digits in N, without trailing zeros. +func T(n float64, v uint64) (t int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then t will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + t, _ = strconv.ParseInt(s[:end], 10, 64) + } + + return +} diff --git a/vendor/github.com/go-playground/universal-translator/.gitignore b/vendor/github.com/go-playground/universal-translator/.gitignore new file mode 100644 index 0000000..2661785 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/LICENSE b/vendor/github.com/go-playground/universal-translator/LICENSE new file mode 100644 index 0000000..8d8aba1 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md new file mode 100644 index 0000000..24aef15 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/README.md @@ -0,0 +1,90 @@ +## universal-translator + +![Project status](https://img.shields.io/badge/version-0.16.0-green.svg) +[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/universal-translator/branches/master/badge.svg)](https://semaphoreci.com/joeybloggs/universal-translator) +[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) +[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules + +Why another i18n library? +-------------------------- +Because none of the plural rules seem to be correct out there, including the previous implementation of this package, +so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package +is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for +use in your applications. + +Features +-------- +- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) +- [x] Support loading translations from files +- [x] Exporting translations to file(s), mainly for getting them professionally translated +- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated +- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1) + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/universal-translator +``` + +Usage & Documentation +------- + +Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs + +##### Examples: + +- [Basic](https://github.com/go-playground/universal-translator/tree/master/examples/basic) +- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/examples/full-no-files) +- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/examples/full-with-files) + +File formatting +-------------- +All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s); +they are only separated for easy viewing. + +##### Examples: + +- [Formats](https://github.com/go-playground/universal-translator/tree/master/examples/file-formats) + +##### Basic Makeup +NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/examples/file-formats) +```json +{ + "locale": "en", + "key": "days-left", + "trans": "You have {0} day left.", + "type": "Cardinal", + "rule": "One", + "override": false +} +``` +|Field|Description| +|---|---| +|locale|The locale for which the translation is for.| +|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.| +|trans|The actual translation text.| +|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)| +|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)| +|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.| + +Help With Tests +--------------- +To anyone interesting in helping or contributing, I sure could use some help creating tests for each language. +Please see issue [here](https://github.com/go-playground/locales/issues/1) for details. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/github.com/go-playground/universal-translator/errors.go b/vendor/github.com/go-playground/universal-translator/errors.go new file mode 100644 index 0000000..38b163b --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/errors.go @@ -0,0 +1,148 @@ +package ut + +import ( + "errors" + "fmt" + + "github.com/go-playground/locales" +) + +var ( + // ErrUnknowTranslation indicates the translation could not be found + ErrUnknowTranslation = errors.New("Unknown Translation") +) + +var _ error = new(ErrConflictingTranslation) +var _ error = new(ErrRangeTranslation) +var _ error = new(ErrOrdinalTranslation) +var _ error = new(ErrCardinalTranslation) +var _ error = new(ErrMissingPluralTranslation) +var _ error = new(ErrExistingTranslator) + +// ErrExistingTranslator is the error representing a conflicting translator +type ErrExistingTranslator struct { + locale string +} + +// Error returns ErrExistingTranslator's internal error text +func (e *ErrExistingTranslator) Error() string { + return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale) +} + +// ErrConflictingTranslation is the error representing a conflicting translation +type ErrConflictingTranslation struct { + locale string + key interface{} + rule locales.PluralRule + text string +} + +// Error returns ErrConflictingTranslation's internal error text +func (e *ErrConflictingTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) + } + + return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) +} + +// ErrRangeTranslation is the error representing a range translation error +type ErrRangeTranslation struct { + text string +} + +// Error returns ErrRangeTranslation's internal error text +func (e *ErrRangeTranslation) Error() string { + return e.text +} + +// ErrOrdinalTranslation is the error representing an ordinal translation error +type ErrOrdinalTranslation struct { + text string +} + +// Error returns ErrOrdinalTranslation's internal error text +func (e *ErrOrdinalTranslation) Error() string { + return e.text +} + +// ErrCardinalTranslation is the error representing a cardinal translation error +type ErrCardinalTranslation struct { + text string +} + +// Error returns ErrCardinalTranslation's internal error text +func (e *ErrCardinalTranslation) Error() string { + return e.text +} + +// ErrMissingPluralTranslation is the error signifying a missing translation given +// the locales plural rules. +type ErrMissingPluralTranslation struct { + locale string + key interface{} + rule locales.PluralRule + translationType string +} + +// Error returns ErrMissingPluralTranslation's internal error text +func (e *ErrMissingPluralTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale) + } + + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale) +} + +// ErrMissingBracket is the error representing a missing bracket in a translation +// eg. This is a {0 <-- missing ending '}' +type ErrMissingBracket struct { + locale string + key interface{} + text string +} + +// Error returns ErrMissingBracket error message +func (e *ErrMissingBracket) Error() string { + return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text) +} + +// ErrBadParamSyntax is the error representing a bad parameter definition in a translation +// eg. This is a {must-be-int} +type ErrBadParamSyntax struct { + locale string + param string + key interface{} + text string +} + +// Error returns ErrBadParamSyntax error message +func (e *ErrBadParamSyntax) Error() string { + return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text) +} + +// import/export errors + +// ErrMissingLocale is the error representing an expected locale that could +// not be found aka locale not registered with the UniversalTranslator Instance +type ErrMissingLocale struct { + locale string +} + +// Error returns ErrMissingLocale's internal error text +func (e *ErrMissingLocale) Error() string { + return fmt.Sprintf("error: locale '%s' not registered.", e.locale) +} + +// ErrBadPluralDefinition is the error representing an incorrect plural definition +// usually found within translations defined within files during the import process. +type ErrBadPluralDefinition struct { + tl translation +} + +// Error returns ErrBadPluralDefinition's internal error text +func (e *ErrBadPluralDefinition) Error() string { + return fmt.Sprintf("error: bad plural definition '%#v'", e.tl) +} diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go new file mode 100644 index 0000000..7bd76f2 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/import_export.go @@ -0,0 +1,274 @@ +package ut + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "io" + + "github.com/go-playground/locales" +) + +type translation struct { + Locale string `json:"locale"` + Key interface{} `json:"key"` // either string or integer + Translation string `json:"trans"` + PluralType string `json:"type,omitempty"` + PluralRule string `json:"rule,omitempty"` + OverrideExisting bool `json:"override,omitempty"` +} + +const ( + cardinalType = "Cardinal" + ordinalType = "Ordinal" + rangeType = "Range" +) + +// ImportExportFormat is the format of the file import or export +type ImportExportFormat uint8 + +// supported Export Formats +const ( + FormatJSON ImportExportFormat = iota +) + +// Export writes the translations out to a file on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error { + + _, err := os.Stat(dirname) + fmt.Println(dirname, err, os.IsNotExist(err)) + if err != nil { + + if !os.IsNotExist(err) { + return err + } + + if err = os.MkdirAll(dirname, 0744); err != nil { + return err + } + } + + // build up translations + var trans []translation + var b []byte + var ext string + + for _, locale := range t.translators { + + for k, v := range locale.(*translator).translations { + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k, + Translation: v.text, + }) + } + + for k, pluralTrans := range locale.(*translator).cardinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: cardinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).ordinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: ordinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).rangeTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: rangeType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + switch format { + case FormatJSON: + b, err = json.MarshalIndent(trans, "", " ") + ext = ".json" + } + + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) + if err != nil { + return err + } + + trans = trans[0:0] + } + + return nil +} + +// Import reads the translations out of a file or directory on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error { + + fi, err := os.Stat(dirnameOrFilename) + if err != nil { + return err + } + + processFn := func(filename string) error { + + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + return t.ImportByReader(format, f) + } + + if !fi.IsDir() { + return processFn(dirnameOrFilename) + } + + // recursively go through directory + walker := func(path string, info os.FileInfo, err error) error { + + if info.IsDir() { + return nil + } + + switch format { + case FormatJSON: + // skip non JSON files + if filepath.Ext(info.Name()) != ".json" { + return nil + } + } + + return processFn(path) + } + + return filepath.Walk(dirnameOrFilename, walker) +} + +// ImportByReader imports the the translations found within the contents read from the supplied reader. +// +// NOTE: generally used when assets have been embedded into the binary and are already in memory. +func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error { + + b, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + var trans []translation + + switch format { + case FormatJSON: + err = json.Unmarshal(b, &trans) + } + + if err != nil { + return err + } + + for _, tl := range trans { + + locale, found := t.FindTranslator(tl.Locale) + if !found { + return &ErrMissingLocale{locale: tl.Locale} + } + + pr := stringToPR(tl.PluralRule) + + if pr == locales.PluralRuleUnknown { + + err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting) + if err != nil { + return err + } + + continue + } + + switch tl.PluralType { + case cardinalType: + err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case ordinalType: + err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case rangeType: + err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting) + default: + return &ErrBadPluralDefinition{tl: tl} + } + + if err != nil { + return err + } + } + + return nil +} + +func stringToPR(s string) locales.PluralRule { + + switch s { + case "One": + return locales.PluralRuleOne + case "Two": + return locales.PluralRuleTwo + case "Few": + return locales.PluralRuleFew + case "Many": + return locales.PluralRuleMany + case "Other": + return locales.PluralRuleOther + default: + return locales.PluralRuleUnknown + } + +} diff --git a/vendor/github.com/go-playground/universal-translator/logo.png b/vendor/github.com/go-playground/universal-translator/logo.png new file mode 100644 index 0000000..a37aa8c Binary files /dev/null and b/vendor/github.com/go-playground/universal-translator/logo.png differ diff --git a/vendor/github.com/go-playground/universal-translator/translator.go b/vendor/github.com/go-playground/universal-translator/translator.go new file mode 100644 index 0000000..cfafce8 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/translator.go @@ -0,0 +1,420 @@ +package ut + +import ( + "fmt" + "strconv" + "strings" + + "github.com/go-playground/locales" +) + +const ( + paramZero = "{0}" + paramOne = "{1}" + unknownTranslation = "" +) + +// Translator is universal translators +// translator instance which is a thin wrapper +// around locales.Translator instance providing +// some extra functionality +type Translator interface { + locales.Translator + + // adds a normal translation for a particular language/locale + // {#} is the only replacement type accepted and are ad infinitum + // eg. one: '{0} day left' other: '{0} days left' + Add(key interface{}, text string, override bool) error + + // adds a cardinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0} day left' other: '{0} days left' + AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds an ordinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' + // - 1st, 2nd, 3rd... + AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds a range plural translation for a particular language/locale + // {0} and {1} are the only replacement types accepted and only these are accepted. + // eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' + AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error + + // creates the translation for the locale given the 'key' and params passed in + T(key interface{}, params ...string) (string, error) + + // creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + C(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + O(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and + // 'digit2' arguments and 'param1' and 'param2' passed in + R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) + + // VerifyTranslations checks to ensures that no plural rules have been + // missed within the translations. + VerifyTranslations() error +} + +var _ Translator = new(translator) +var _ locales.Translator = new(translator) + +type translator struct { + locales.Translator + translations map[interface{}]*transText + cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown + ordinalTanslations map[interface{}][]*transText + rangeTanslations map[interface{}][]*transText +} + +type transText struct { + text string + indexes []int +} + +func newTranslator(trans locales.Translator) Translator { + return &translator{ + Translator: trans, + translations: make(map[interface{}]*transText), // translation text broken up by byte index + cardinalTanslations: make(map[interface{}][]*transText), + ordinalTanslations: make(map[interface{}][]*transText), + rangeTanslations: make(map[interface{}][]*transText), + } +} + +// Add adds a normal translation for a particular language/locale +// {#} is the only replacement type accepted and are ad infinitum +// eg. one: '{0} day left' other: '{0} days left' +func (t *translator) Add(key interface{}, text string, override bool) error { + + if _, ok := t.translations[key]; ok && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text} + } + + lb := strings.Count(text, "{") + rb := strings.Count(text, "}") + + if lb != rb { + return &ErrMissingBracket{locale: t.Locale(), key: key, text: text} + } + + trans := &transText{ + text: text, + } + + var idx int + + for i := 0; i < lb; i++ { + s := "{" + strconv.Itoa(i) + "}" + idx = strings.Index(text, s) + if idx == -1 { + return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text} + } + + trans.indexes = append(trans.indexes, idx) + trans.indexes = append(trans.indexes, idx+len(s)) + } + + t.translations[key] = trans + + return nil +} + +// AddCardinal adds a cardinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0} day left' other: '{0} days left' +func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsCardinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.cardinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.cardinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddOrdinal adds an ordinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd... +func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsOrdinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.ordinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.ordinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddRange adds a range plural translation for a particular language/locale +// {0} and {1} are the only replacement types accepted and only these are accepted. +// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' +func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsRange() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.rangeTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.rangeTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 4, 4), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + idx = strings.Index(text, paramOne) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)} + } + + trans.indexes[2] = idx + trans.indexes[3] = idx + len(paramOne) + + return nil +} + +// T creates the translation for the locale given the 'key' and params passed in +func (t *translator) T(key interface{}, params ...string) (string, error) { + + trans, ok := t.translations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + b := make([]byte, 0, 64) + + var start, end, count int + + for i := 0; i < len(trans.indexes); i++ { + end = trans.indexes[i] + b = append(b, trans.text[start:end]...) + b = append(b, params[count]...) + i++ + start = trans.indexes[i] + count++ + } + + b = append(b, trans.text[start:]...) + + return string(b), nil +} + +// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.cardinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.CardinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.ordinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.OrdinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments +// and 'param1' and 'param2' passed in +func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) { + + tarr, ok := t.rangeTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.RangePluralRule(num1, digits1, num2, digits2) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param1...) + b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...) + b = append(b, param2...) + b = append(b, trans.text[trans.indexes[3]:]...) + + return string(b), nil +} + +// VerifyTranslations checks to ensures that no plural rules have been +// missed within the translations. +func (t *translator) VerifyTranslations() error { + + for k, v := range t.cardinalTanslations { + + for _, rule := range t.PluralsCardinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k} + } + } + } + + for k, v := range t.ordinalTanslations { + + for _, rule := range t.PluralsOrdinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k} + } + } + } + + for k, v := range t.rangeTanslations { + + for _, rule := range t.PluralsRange() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k} + } + } + } + + return nil +} diff --git a/vendor/github.com/go-playground/universal-translator/universal_translator.go b/vendor/github.com/go-playground/universal-translator/universal_translator.go new file mode 100644 index 0000000..dbf707f --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/universal_translator.go @@ -0,0 +1,113 @@ +package ut + +import ( + "strings" + + "github.com/go-playground/locales" +) + +// UniversalTranslator holds all locale & translation data +type UniversalTranslator struct { + translators map[string]Translator + fallback Translator +} + +// New returns a new UniversalTranslator instance set with +// the fallback locale and locales it should support +func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator { + + t := &UniversalTranslator{ + translators: make(map[string]Translator), + } + + for _, v := range supportedLocales { + + trans := newTranslator(v) + t.translators[strings.ToLower(trans.Locale())] = trans + + if fallback.Locale() == v.Locale() { + t.fallback = trans + } + } + + if t.fallback == nil && fallback != nil { + t.fallback = newTranslator(fallback) + } + + return t +} + +// FindTranslator trys to find a Translator based on an array of locales +// and returns the first one it can find, otherwise returns the +// fallback translator. +func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) { + + for _, locale := range locales { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + } + + return t.fallback, false +} + +// GetTranslator returns the specified translator for the given locale, +// or fallback if not found +func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + + return t.fallback, false +} + +// GetFallback returns the fallback locale +func (t *UniversalTranslator) GetFallback() Translator { + return t.fallback +} + +// AddTranslator adds the supplied translator, if it already exists the override param +// will be checked and if false an error will be returned, otherwise the translator will be +// overridden; if the fallback matches the supplied translator it will be overridden as well +// NOTE: this is normally only used when translator is embedded within a library +func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error { + + lc := strings.ToLower(translator.Locale()) + _, ok := t.translators[lc] + if ok && !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + trans := newTranslator(translator) + + if t.fallback.Locale() == translator.Locale() { + + // because it's optional to have a fallback, I don't impose that limitation + // don't know why you wouldn't but... + if !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + t.fallback = trans + } + + t.translators[lc] = trans + + return nil +} + +// VerifyTranslations runs through all locales and identifies any issues +// eg. missing plural rules for a locale +func (t *UniversalTranslator) VerifyTranslations() (err error) { + + for _, trans := range t.translators { + err = trans.VerifyTranslations() + if err != nil { + return + } + } + + return +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 0000000..d8156a6 --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 0000000..04fdf09 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 0000000..b4bb97f --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 0000000..5dc6826 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 0000000..9d92c11 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 0000000..fa820b9 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 0000000..5b8a4b9 --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 0000000..b174616 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 0000000..7f9e0c6 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 0000000..3e4e90d --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,89 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 0000000..24b78ed --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 0000000..0cbbcdd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 0000000..f326b54 --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 0000000..e6ef06c --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 0000000..5ea6c73 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 0000000..7f3643f --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,198 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) (UUID, error) { + var uuid UUID + if len(s) != 36 { + if len(s) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + if len(b) != 36 { + if len(b) != 36+9 { + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + } + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 0000000..199a1ac --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 0000000..84af91c --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + var uuid UUID + _, err := io.ReadFull(rander, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/gwenn/gosqlite/.gitignore b/vendor/github.com/gwenn/gosqlite/.gitignore new file mode 100644 index 0000000..2190738 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/.gitignore @@ -0,0 +1,7 @@ +*.[568ao] +[568a].out +_testmain.go +_obj +_test +*.swp +_cgo_* \ No newline at end of file diff --git a/vendor/github.com/gwenn/gosqlite/.travis.yml b/vendor/github.com/gwenn/gosqlite/.travis.yml new file mode 100644 index 0000000..60d8370 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/.travis.yml @@ -0,0 +1,12 @@ +sudo: false +language: go +go: + - 1.11.x +install: +- go get github.com/gwenn/yacr +- go get -tags all github.com/gwenn/gosqlite +before_script: + - go get github.com/bmizerany/assert +script: +# - GODEBUG=cgocheck=2 go test -v -tags all github.com/gwenn/gosqlite + - GODEBUG=cgocheck=0 go test -v -tags all github.com/gwenn/gosqlite diff --git a/vendor/github.com/gwenn/gosqlite/LICENSE b/vendor/github.com/gwenn/gosqlite/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gwenn/gosqlite/README.md b/vendor/github.com/gwenn/gosqlite/README.md new file mode 100644 index 0000000..67b26ab --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/README.md @@ -0,0 +1,169 @@ +[![GoDoc](https://godoc.org/github.com/gwenn/gosqlite?status.svg)](https://godoc.org/github.com/gwenn/gosqlite) [![Build Status][1]][2] [![Go Report Card](https://goreportcard.com/badge/github.com/gwenn/gosqlite)](https://goreportcard.com/report/github.com/gwenn/gosqlite) [![Coverage Status](https://coveralls.io/repos/github/gwenn/gosqlite/badge.svg?branch=master)](https://coveralls.io/github/gwenn/gosqlite?branch=master) + +[1]: https://secure.travis-ci.org/gwenn/gosqlite.png +[2]: http://www.travis-ci.org/gwenn/gosqlite + +Yet another SQLite binding based on: + - original [Russ Cox's](http://code.google.com/p/gosqlite/) implementation (or [here](https://github.com/rsc/sqlite)), + - the [Patrick Crosby's](https://github.com/patrickxb/fgosqlite/) fork. + +There are two layers: + * one matching the SQLite API (with Backup, Blob, user-defined Function/Module, ...). + * and another implementing the "database/sql/driver" interface. + +### Caveat +With Go 1.6, some features do not work anymore without `GODEBUG=cgocheck=0` (see [Cgo major change](https://golang.org/doc/go1.6#cgo)). +It seems that the solution is a global variable/lock (see [here](https://github.com/mattn/go-sqlite3/pull/268)). +"I would prefer not to" do this. + +### Custom build +If your OS does not bundle SQLite3 development files (or old ones): +- download and copy SQLite3 files + +```sh +$ cp ~/Downloads/sqlite-amalgamation-xxx/sqlite3.{c,h} $GOPATH/src/github.com/gwenn/gosqlite +``` + +- patch sqlite.go file + +``` +-#cgo linux freebsd pkg-config: sqlite3 +-#cgo !linux,!freebsd LDFLAGS: -lsqlite3 ++#cgo CFLAGS: -I. ++#cgo CFLAGS: -DSQLITE_ENABLE_COLUMN_METADATA=1 +``` + +### Features (not supported by database/sql/driver): + +* ~~Named bind parameters~~. +* Partial scan: scan values may be partially scanned (by index or name) or skipped/ignored by passing nil pointer(s). +* Null value: by default, empty string and zero time are bound to NULL for prepared statement's parameters (no need for NullString, NullTime but still supported). +* Null value: Stmt.*Scan* methods return default Go zero value (0, "", ...) for SQL NULL (no need for NullInt64, NullString, NullTime but still supported). +* Correctly retrieve the time returns by `select current_timestamp` statement or others expressions: in SQLite, [expression affinity](http://www.sqlite.org/datatype3.html#expraff) is NONE. +* [Full control over connection pool](https://code.google.com/p/go/issues/detail?id=4805) +* [No restrictive converter](https://code.google.com/p/go/issues/detail?id=6918) +* [Support for metadata](https://code.google.com/p/go/issues/detail?id=7408) +* [Nested transaction support](https://code.google.com/p/go/issues/detail?id=7898) + +### Changes: + +Open supports flags. +Conn.Exec handles multiple statements (separated by semicolons) properly. +Conn.Prepare can optionally bind as well. +Conn.Prepare can reuse already prepared Stmt. +Conn.Close ensures that all dangling statements are finalized. +Stmt.Exec is renamed in Stmt.Bind and a new Stmt.Exec method is introduced to bind and step. +Stmt.Bind uses native sqlite3_bind_x methods and failed if unsupported type. +Stmt.NamedBind can be used to bind by name. +Stmt.Next returns a (bool, os.Error) couple like Reader.Read. +Stmt.Scan uses native sqlite3_column_x methods. +Stmt.NamedScan is added. It's compliant with [go-dbi](https://github.com/thomaslee/go-dbi/). +Stmt.ScanByIndex/ScanByName are added to test NULL value. + +Currently, the weak point of the binding is the *Scan* methods: +The original implementation is using this strategy: + - convert the stored value to a []byte by calling sqlite3_column_blob, + - convert the bytes to the desired Go type with correct feedback in case of illegal conversion, + - but apparently no support for NULL value. + +Using the native sqlite3_column_x implies: + - optimal conversion from the storage type to Go type (when they match), + - lossy conversion when types mismatch (select cast('M' as int); --> 0), + - NULL value can be returned only for **type, otherwise a default value (0, false, "") is returned. + +SQLite logs (SQLITE_CONFIG_LOG) can be activated by: +- ConfigLog function +- or `export SQLITE_LOG=1` + +### Similar projects created after Jul 17, 2011: + +https://github.com/mattn/go-sqlite3 (Nov 11, 2011) +https://github.com/mxk/go-sqlite (Feb 12, 2013) +https://github.com/crawshaw/sqlite (Mar 28, 2018) +https://github.com/bvinc/go-sqlite-lite (Aug 7, 2018) + +### Additions: + +Conn.Exists +Conn.OneValue + +Conn.OpenVfs +Conn.EnableFkey/IsFKeyEnabled +Conn.Changes/TotalChanges +Conn.LastInsertRowid +Conn.Interrupt +Conn.Begin/BeginTransaction(type)/Commit/Rollback +Conn.GetAutocommit +Conn.EnableLoadExtension/LoadExtension +Conn.IntegrityCheck + +Stmt.Insert/ExecDml/Select/SelectOneRow +Stmt.BindParameterCount/BindParameterIndex(name)/BindParameterName(index) +Stmt.ClearBindings +Stmt.ColumnCount/ColumnNames/ColumnIndex(name)/ColumnName(index)/ColumnType(index) +Stmt.ReadOnly +Stmt.Busy + +Blob: +ZeroBlobLength +Conn.NewBlobReader +Conn.NewBlobReadWriter + +Meta: +Conn.Databases +Conn.Tables/Views/Indexes +Conn.Columns +Conn.ForeignKeys +Conn.TableIndexes/IndexColumns + +Time: +JulianDay +JulianDayToUTC +JulianDayToLocalTime +UnixTime, JulianTime and TimeStamp used to persist go time in formats supported by SQLite3 date functions. + +Trace: +Conn.BusyHandler +Conn.Profile +Conn.ProgressHandler +Conn.SetAuthorizer +Conn.Trace +Stmt.Status + +Hook: +Conn.CommitHook +Conn.RollbackHook +Conn.UpdateHook + +Function: +Conn.CreateScalarFunction +Conn.CreateAggregateFunction + +Virtual Table (partial support): +Conn.CreateModule +Conn.DeclareVTab + +### GC: +Although Go is gced, there is no destructor (see http://www.airs.com/blog/archives/362). +In the gosqlite wrapper, no finalizer is used. +So users must ensure that C resources (database connections, prepared statements, BLOBs, Backups) are destroyed/deallocated by calling Conn.Close, Stmt.Finalize, BlobReader.Close, Backup.Close. + +Therefore, sqlite3_close/sqlite3_next_stmt are used by Conn.Close to free the database connection and all dangling statements (not sqlite3_close_v2) (see http://sqlite.org/c3ref/close.html). + +### Benchmarks: +$ go test -bench . -benchmem +
+BenchmarkValuesScan	  500000	      6265 ns/op	      74 B/op	       3 allocs/op
+BenchmarkScan	  500000	      4994 ns/op	      41 B/op	       4 allocs/op
+BenchmarkNamedScan	  500000	      4960 ns/op	      93 B/op	       7 allocs/op
+
+BenchmarkInsert	  500000	      4085 ns/op	      16 B/op	       1 allocs/op
+BenchmarkNamedInsert	  500000	      4798 ns/op	      64 B/op	       4 allocs/op
+
+BenchmarkDisabledCache	  100000	     19841 ns/op	     117 B/op	       3 allocs/op
+BenchmarkEnabledCache	 2000000	       790 ns/op	      50 B/op	       1 allocs/op
+
+BenchmarkLike	 1000000	      2605 ns/op	       0 B/op	       0 allocs/op
+BenchmarkHalf	  500000	      4988 ns/op	      33 B/op	       1 allocs/op
+BenchmarkRegexp	  500000	      5557 ns/op	       8 B/op	       1 allocs/op
+
diff --git a/vendor/github.com/gwenn/gosqlite/backup.go b/vendor/github.com/gwenn/gosqlite/backup.go new file mode 100644 index 0000000..e86178f --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/backup.go @@ -0,0 +1,124 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include +*/ +import "C" + +import ( + "errors" + "time" + "unsafe" +) + +// NewBackup initializes the backup/copy of the content of one database (source) to another (destination). +// The database name is "main", "temp", or the name specified in an ATTACH statement. +// +// (See http://sqlite.org/c3ref/backup_finish.html#sqlite3backupinit) +func NewBackup(dst *Conn, dstName string, src *Conn, srcName string) (*Backup, error) { + if dst == nil || src == nil { + return nil, errors.New("nil sqlite backup source or destination") + } + dname := C.CString(dstName) + sname := C.CString(srcName) + + sb := C.sqlite3_backup_init(dst.db, dname, src.db, sname) + C.free(unsafe.Pointer(sname)) + C.free(unsafe.Pointer(dname)) + if sb == nil { + return nil, dst.error(C.sqlite3_errcode(dst.db), "backup init failed") + } + return &Backup{sb, dst, src}, nil +} + +// The Backup object records state information about an ongoing online backup operation. +// (See http://sqlite.org/c3ref/backup.html) +type Backup struct { + sb *C.sqlite3_backup + dst, src *Conn +} + +// Step copies up to N pages between the source and destination databases. +// (See http://sqlite.org/c3ref/backup_finish.html#sqlite3backupstep) +func (b *Backup) Step(npage int32) error { + if b == nil { + return errors.New("nil sqlite backup") + } + rv := C.sqlite3_backup_step(b.sb, C.int(npage)) + if rv == C.SQLITE_OK || Errno(rv&0xFF) == ErrBusy || Errno(rv&0xFF) == ErrLocked { // TODO Trace busy/locked errors + return nil + } else if rv == C.SQLITE_DONE { + return Errno(rv) + } + return b.dst.error(rv, "backup step failed") +} + +// BackupStatus reports backup progression +type BackupStatus struct { + Remaining int + PageCount int +} + +// Status returns the number of pages still to be backed up and the total number of pages in the source database file. +// (See http://sqlite.org/c3ref/backup_finish.html#sqlite3backupremaining) +func (b *Backup) Status() BackupStatus { + return BackupStatus{int(C.sqlite3_backup_remaining(b.sb)), int(C.sqlite3_backup_pagecount(b.sb))} +} + +// Run starts the backup: +// - copying up to 'npage' pages between the source and destination at each step, +// - sleeping 'sleepNs' between steps, +// - notifying the caller of backup progress throw the channel 'c', +// - closing the backup when done or when an error happens. +// Sleeping is disabled if 'sleepNs' is zero or negative. +// Notification is disabled if 'c' is null. +// (See http://sqlite.org/c3ref/backup_finish.html#sqlite3backupstep, sqlite3_backup_remaining and sqlite3_backup_pagecount) +func (b *Backup) Run(npage int32, sleepNs time.Duration, c chan<- BackupStatus) error { + var err error + for { + err = b.Step(npage) + if err != nil { + break + } + if c != nil { + c <- b.Status() + } + if sleepNs > 0 { + time.Sleep(sleepNs) + } + } + if err != Done { + b.Close() + } else { + if c != nil { + c <- b.Status() + } + err = b.Close() + } + if err != nil && err != Done { + return err + } + return nil +} + +// Close finishes/stops the backup. +// (See http://sqlite.org/c3ref/backup_finish.html#sqlite3backupfinish) +func (b *Backup) Close() error { + if b == nil { + return errors.New("nil sqlite backup") + } + if b.sb == nil { + return nil + } + rv := C.sqlite3_backup_finish(b.sb) // must be called only once + b.sb = nil + if rv != C.SQLITE_OK { + return b.dst.error(rv, "backup finish failed") + } + return nil +} diff --git a/vendor/github.com/gwenn/gosqlite/blob.go b/vendor/github.com/gwenn/gosqlite/blob.go new file mode 100644 index 0000000..122c3ab --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/blob.go @@ -0,0 +1,205 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include +*/ +import "C" + +import ( + "errors" + "fmt" + "io" + "unsafe" +) + +// BlobReader is an io.ReadCloser adapter for BLOB +// (See http://sqlite.org/c3ref/blob.html) +type BlobReader struct { + c *Conn + bl *C.sqlite3_blob + size int32 + offset int32 +} + +// BlobReadWriter is an io.ReadWriteCloser adapter for BLOB +type BlobReadWriter struct { + BlobReader +} + +// ZeroBlobLength is used to reserve space for a BLOB that is later written. +// stmt.Bind(..., ZeroBlobLength(1000), ...) +// (See http://sqlite.org/lang_corefunc.html#zeroblob) +type ZeroBlobLength int32 + +// NewBlobReader opens a BLOB for incremental I/O in read-only mode. +// +// (See http://sqlite.org/c3ref/blob_open.html) +func (c *Conn) NewBlobReader(db, table, column string, row int64) (*BlobReader, error) { + bl, err := c.blobOpen(db, table, column, row, false) + if err != nil { + return nil, err + } + return &BlobReader{c, bl, -1, 0}, nil +} + +// NewBlobReadWriter opens a BLOB for incremental I/O. +// (See http://sqlite.org/c3ref/blob_open.html) +func (c *Conn) NewBlobReadWriter(db, table, column string, row int64) (*BlobReadWriter, error) { + bl, err := c.blobOpen(db, table, column, row, true) + if err != nil { + return nil, err + } + return &BlobReadWriter{BlobReader{c, bl, -1, 0}}, nil +} + +func (c *Conn) blobOpen(db, table, column string, row int64, write bool) (*C.sqlite3_blob, error) { + zDb := C.CString(db) + zTable := C.CString(table) + zColumn := C.CString(column) + var bl *C.sqlite3_blob + rv := C.sqlite3_blob_open(c.db, zDb, zTable, zColumn, C.sqlite3_int64(row), btocint(write), &bl) + C.free(unsafe.Pointer(zColumn)) + C.free(unsafe.Pointer(zTable)) + C.free(unsafe.Pointer(zDb)) + if rv != C.SQLITE_OK { + if bl != nil { + C.sqlite3_blob_close(bl) + } + return nil, c.error(rv, fmt.Sprintf("Conn.blobOpen(db: %q, tbl: %q, col: %q, row: %d)", db, table, column, row)) + } + if bl == nil { + return nil, errors.New("sqlite succeeded without returning a blob") + } + return bl, nil +} + +// Close closes a BLOB handle. +// (See http://sqlite.org/c3ref/blob_close.html) +func (r *BlobReader) Close() error { + if r == nil { + return errors.New("nil sqlite blob") + } + if r.bl == nil { + return nil + } + rv := C.sqlite3_blob_close(r.bl) // must be called only once + r.bl = nil + if rv != C.SQLITE_OK { + return r.c.error(rv, "BlobReader.Close") + } + return nil +} + +// Read reads data from a BLOB incrementally. +// (See http://sqlite.org/c3ref/blob_read.html) +func (r *BlobReader) Read(v []byte) (int, error) { + if len(v) == 0 { + return 0, nil + } + size, err := r.Size() + if err != nil { + return 0, err + } + if r.offset >= size { + return 0, io.EOF + } + n := size - r.offset + if len(v) < int(n) { + n = int32(len(v)) + } + p := &v[0] + rv := C.sqlite3_blob_read(r.bl, unsafe.Pointer(p), C.int(n), C.int(r.offset)) + if rv != C.SQLITE_OK { + return 0, r.c.error(rv, "BlobReader.Read") + } + r.offset += n + return int(n), nil +} + +// Seek sets the offset for the next Read or Write to offset. +// Tell is possible with Seek(0, os.SEEK_CUR). +// SQLite is limited to 32-bits offset. +func (r *BlobReader) Seek(offset int64, whence int) (int64, error) { + size, err := r.Size() + if err != nil { + return 0, err + } + switch whence { + case 0: // SEEK_SET + if offset < 0 || offset > int64(size) { + return 0, r.c.specificError("invalid offset: %d", offset) + } + r.offset = int32(offset) + case 1: // SEEK_CUR + if (int64(r.offset)+offset) < 0 || (int64(r.offset)+offset) > int64(size) { + return 0, r.c.specificError("invalid offset: %d", offset) + } + r.offset += int32(offset) + case 2: // SEEK_END + if (int64(size)+offset) < 0 || offset > 0 { + return 0, r.c.specificError("invalid offset: %d", offset) + } + r.offset = size + int32(offset) + default: + return 0, r.c.specificError("bad seekMode: %d", whence) + } + return int64(r.offset), nil +} + +// Size returns the size of an opened BLOB. +// (See http://sqlite.org/c3ref/blob_bytes.html) +func (r *BlobReader) Size() (int32, error) { + if r.bl == nil { + return 0, errors.New("blob already closed") + } + if r.size < 0 { + r.size = int32(C.sqlite3_blob_bytes(r.bl)) + } + return r.size, nil +} + +// Write writes data into a BLOB incrementally. +// (See http://sqlite.org/c3ref/blob_write.html) +func (w *BlobReadWriter) Write(v []byte) (int, error) { + if len(v) == 0 { + return 0, nil + } + size, err := w.Size() + if err != nil { + return 0, err + } + if w.offset >= size { + return 0, io.EOF + } + /* Write must return a non-nil error if it returns n < len(v) */ + n := size - w.offset + if len(v) <= int(n) { + n = int32(len(v)) + } else { + err = io.EOF + } + p := &v[0] + rv := C.sqlite3_blob_write(w.bl, unsafe.Pointer(p), C.int(n), C.int(w.offset)) + if rv != C.SQLITE_OK { + return 0, w.c.error(rv, "BlobReadWiter.Write") + } + w.offset += n + return int(n), err +} + +// Reopen moves a BLOB handle to a new row. +// (See http://sqlite.org/c3ref/blob_reopen.html) +func (r *BlobReader) Reopen(rowid int64) error { + rv := C.sqlite3_blob_reopen(r.bl, C.sqlite3_int64(rowid)) + if rv != C.SQLITE_OK { + return r.c.error(rv, fmt.Sprintf("BlobReader.Reopen(%d)", rowid)) + } + r.size = -1 + r.offset = 0 + return nil +} diff --git a/vendor/github.com/gwenn/gosqlite/cache.go b/vendor/github.com/gwenn/gosqlite/cache.go new file mode 100644 index 0000000..ad07cec --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/cache.go @@ -0,0 +1,107 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +import ( + "container/list" + "sync" +) + +const ( + defaultCacheSize = 10 +) + +// Like http://www.sqlite.org/tclsqlite.html#cache +type cache struct { + m sync.Mutex + l *list.List + maxSize int // Cache turned off when maxSize <= 0 +} + +func newCache() *cache { + return newCacheSize(defaultCacheSize) +} +func newCacheSize(maxSize int) *cache { + if maxSize <= 0 { + return &cache{maxSize: maxSize} + } + return &cache{l: list.New(), maxSize: maxSize} +} + +// To be called in Conn#Prepare +func (c *cache) find(sql string) *Stmt { + if c.maxSize <= 0 { + return nil + } + c.m.Lock() + defer c.m.Unlock() + for e := c.l.Front(); e != nil; e = e.Next() { + s := e.Value.(*Stmt) + if s.SQL() == sql { // TODO s.SQL() may have been trimmed by SQLite + c.l.Remove(e) + return s + } + } + return nil +} + +// To be called in Stmt#Finalize +func (c *cache) release(s *Stmt) error { + if c.maxSize <= 0 || len(s.tail) > 0 || s.Busy() { + return s.finalize() + } + if err := s.Reset(); err != nil { + s.finalize() + return err + } + if err := s.ClearBindings(); err != nil { + s.finalize() + return nil + } + c.m.Lock() + defer c.m.Unlock() + c.l.PushFront(s) + for c.l.Len() > c.maxSize { + c.l.Remove(c.l.Back()).(*Stmt).finalize() + } + return nil +} + +// Finalize and free the cached prepared statements +// To be called in Conn#Close +func (c *cache) flush() { + if c.maxSize <= 0 { + return + } + c.m.Lock() + defer c.m.Unlock() + var e, next *list.Element + for e = c.l.Front(); e != nil; e = next { + next = e.Next() + c.l.Remove(e).(*Stmt).finalize() + } +} + +// CacheSize returns (current, max) sizes. +// Prepared statements cache is turned off when max size is 0 +func (c *Conn) CacheSize() (current int, max int) { + if c.stmtCache.maxSize <= 0 { + return 0, 0 + } + return c.stmtCache.l.Len(), c.stmtCache.maxSize +} + +// SetCacheSize sets the size of prepared statements cache. +// Cache is turned off (and flushed) when size <= 0 +func (c *Conn) SetCacheSize(size int) { + stmtCache := c.stmtCache + if stmtCache.l == nil && size > 0 { + stmtCache.l = list.New() + } + if size <= 0 { + stmtCache.flush() + } + stmtCache.maxSize = size +} diff --git a/vendor/github.com/gwenn/gosqlite/config.c b/vendor/github.com/gwenn/gosqlite/config.c new file mode 100644 index 0000000..4ffdb45 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/config.c @@ -0,0 +1,14 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +//#include "_cgo_export.h" + +int goSqlite3ConfigThreadMode(int mode) { + return sqlite3_config(mode); +} + +int goSqlite3Config(int op, int mode) { + return sqlite3_config(op, mode); +} diff --git a/vendor/github.com/gwenn/gosqlite/config.go b/vendor/github.com/gwenn/gosqlite/config.go new file mode 100644 index 0000000..972ecb4 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/config.go @@ -0,0 +1,129 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include + +// cgo doesn't support varargs +static inline int my_db_config(sqlite3 *db, int op, int v, int *ok) { + return sqlite3_db_config(db, op, v, ok); +} + +int goSqlite3ConfigThreadMode(int mode); +int goSqlite3Config(int op, int mode); +*/ +import "C" + +import "unsafe" + +// ThreadingMode enumerates SQLite threading mode +// See ConfigThreadingMode +type ThreadingMode int32 + +// SQLite threading modes +const ( + SingleThread ThreadingMode = C.SQLITE_CONFIG_SINGLETHREAD + MultiThread ThreadingMode = C.SQLITE_CONFIG_MULTITHREAD + Serialized ThreadingMode = C.SQLITE_CONFIG_SERIALIZED +) + +// ConfigThreadingMode alters threading mode. +// (See sqlite3_config(SQLITE_CONFIG_SINGLETHREAD|SQLITE_CONFIG_MULTITHREAD|SQLITE_CONFIG_SERIALIZED): http://sqlite.org/c3ref/config.html) +func ConfigThreadingMode(mode ThreadingMode) error { + rv := C.goSqlite3ConfigThreadMode(C.int(mode)) + if rv == C.SQLITE_OK { + return nil + } + return Errno(rv) +} + +// ConfigMemStatus enables or disables the collection of memory allocation statistics. +// (See sqlite3_config(SQLITE_CONFIG_MEMSTATUS): http://sqlite.org/c3ref/config.html) +func ConfigMemStatus(b bool) error { + rv := C.goSqlite3Config(C.SQLITE_CONFIG_MEMSTATUS, btocint(b)) + if rv == C.SQLITE_OK { + return nil + } + return Errno(rv) +} + +// ConfigURI enables or disables URI handling. +// (See sqlite3_config(SQLITE_CONFIG_URI): http://sqlite.org/c3ref/config.html) +func ConfigURI(b bool) error { + rv := C.goSqlite3Config(C.SQLITE_CONFIG_URI, btocint(b)) + if rv == C.SQLITE_OK { + return nil + } + return Errno(rv) +} + +// EnableSharedCache enables or disables shared pager cache +// (See http://sqlite.org/c3ref/enable_shared_cache.html) +func EnableSharedCache(b bool) error { + rv := C.sqlite3_enable_shared_cache(btocint(b)) + if rv == C.SQLITE_OK { + return nil + } + return Errno(rv) +} + +// EnableFKey enables or disables the enforcement of foreign key constraints. +// Calls sqlite3_db_config(db, SQLITE_DBCONFIG_ENABLE_FKEY, b). +// Another way is PRAGMA foreign_keys = boolean; +// +// (See http://sqlite.org/c3ref/c_dbconfig_enable_fkey.html) +func (c *Conn) EnableFKey(b bool) (bool, error) { + return c.queryOrSetEnableDbConfig(C.SQLITE_DBCONFIG_ENABLE_FKEY, btocint(b)) +} + +// IsFKeyEnabled reports if the enforcement of foreign key constraints is enabled or not. +// Calls sqlite3_db_config(db, SQLITE_DBCONFIG_ENABLE_FKEY, -1). +// Another way is PRAGMA foreign_keys; +// +// (See http://sqlite.org/c3ref/c_dbconfig_enable_fkey.html) +func (c *Conn) IsFKeyEnabled() (bool, error) { + return c.queryOrSetEnableDbConfig(C.SQLITE_DBCONFIG_ENABLE_FKEY, -1) +} + +// EnableTriggers enables or disables triggers. +// Calls sqlite3_db_config(db, SQLITE_DBCONFIG_ENABLE_TRIGGER, b). +// +// (See http://sqlite.org/c3ref/c_dbconfig_enable_fkey.html) +func (c *Conn) EnableTriggers(b bool) (bool, error) { + return c.queryOrSetEnableDbConfig(C.SQLITE_DBCONFIG_ENABLE_TRIGGER, btocint(b)) +} + +// AreTriggersEnabled checks if triggers are enabled. +// Calls sqlite3_db_config(db, SQLITE_DBCONFIG_ENABLE_TRIGGER, -1) +// +// (See http://sqlite.org/c3ref/c_dbconfig_enable_fkey.html) +func (c *Conn) AreTriggersEnabled() (bool, error) { + return c.queryOrSetEnableDbConfig(C.SQLITE_DBCONFIG_ENABLE_TRIGGER, -1) +} + +func (c *Conn) queryOrSetEnableDbConfig(key, i C.int) (bool, error) { + var ok C.int + rv := C.my_db_config(c.db, key, i, &ok) + if rv == C.SQLITE_OK { + return (ok == 1), nil + } + return false, c.error(rv) +} + +// EnableExtendedResultCodes enables or disables the extended result codes feature of SQLite. +// (See http://sqlite.org/c3ref/extended_result_codes.html) +func (c *Conn) EnableExtendedResultCodes(b bool) error { + return c.error(C.sqlite3_extended_result_codes(c.db, btocint(b)), "Conn.EnableExtendedResultCodes") +} + +// CompileOptionUsed returns false or true indicating whether the specified option was defined at compile time. +// (See http://sqlite.org/c3ref/compileoption_get.html) +func CompileOptionUsed(optName string) bool { + cOptName := C.CString(optName) + defer C.free(unsafe.Pointer(cOptName)) + return C.sqlite3_compileoption_used(cOptName) == 1 +} diff --git a/vendor/github.com/gwenn/gosqlite/config_extra.go b/vendor/github.com/gwenn/gosqlite/config_extra.go new file mode 100644 index 0000000..f05a17d --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/config_extra.go @@ -0,0 +1,65 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build all +// See SQLITE_OMIT_LOAD_EXTENSION (http://www.sqlite.org/compile.html) + +package sqlite + +/* +#include +#include + +static int goSqlite3ConfigMMapSize(sqlite3_int64 defaultSize, sqlite3_int64 maxSize) { +#if SQLITE_VERSION_NUMBER < 3007017 + return -1; +#else + return sqlite3_config(SQLITE_CONFIG_MMAP_SIZE, defaultSize, maxSize); +#endif +} +*/ +import "C" + +import ( + "unsafe" +) + +// EnableLoadExtension enables or disables extension loading. +// (See http://sqlite.org/c3ref/enable_load_extension.html) +func (c *Conn) EnableLoadExtension(b bool) error { + rv := C.sqlite3_enable_load_extension(c.db, btocint(b)) + if rv == C.SQLITE_OK { + return nil + } + return c.error(rv, "Conn.EnableLoadExtension") +} + +// LoadExtension loads an extension +// (See http://sqlite.org/c3ref/load_extension.html) +func (c *Conn) LoadExtension(file string, proc ...string) error { + cfile := C.CString(file) + defer C.free(unsafe.Pointer(cfile)) + var cproc *C.char + if len(proc) > 0 { + cproc = C.CString(proc[0]) + defer C.free(unsafe.Pointer(cproc)) + } + var errMsg *C.char + rv := C.sqlite3_load_extension(c.db, cfile, cproc, &errMsg) + if rv != C.SQLITE_OK { + defer C.sqlite3_free(unsafe.Pointer(errMsg)) + return c.error(rv, C.GoString(errMsg)) + } + return nil +} + +// ConfigMMapSize decreases or increases the default mmap_size/reduces the hard upper bound at start time. +// (See http://www.sqlite.org/c3ref/c_config_covering_index_scan.html#sqliteconfigmmapsize) +func ConfigMMapSize(defaultSize, maxSize int64) error { + rv := C.goSqlite3ConfigMMapSize(C.sqlite3_int64(defaultSize), C.sqlite3_int64(maxSize)) + if rv == C.SQLITE_OK { + return nil + } + return Errno(rv) +} diff --git a/vendor/github.com/gwenn/gosqlite/csv.go b/vendor/github.com/gwenn/gosqlite/csv.go new file mode 100644 index 0000000..edc64a4 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/csv.go @@ -0,0 +1,493 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +import ( + "bufio" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/gwenn/yacr" +) + +type csvModule struct { +} + +// args[0] => module name +// args[1] => db name +// args[2] => table name +// args[3] => filename (maybe quoted: '...') +// args[i>3] : +// - contains HEADER ignoring case => use first line in file as column names or skip first line if NAMES are specified +// - contains NO_QUOTE ignoring case => no double quoted field expected in file +// - single char (;) or quoted char (';') => values separator in file +// - contains NAMES ignoring case => use args[i+1], ... as column names (until _TYPES_) +// - contains TYPES ignoring case => use args[I+1], ... as column types +// Beware, empty args are skipped (..., ,...), use '' empty SQL string instead (..., '', ...). +// Adapted from: +// - https://github.com/gwenn/sqlite-csv-ext +// - http://www.ch-werner.de/sqliteodbc/html/csvtable_8c.html +func (m csvModule) Create(c *Conn, args []string) (VTab, error) { + if len(args) < 4 { + return nil, errors.New("no CSV file specified") + } + /* pull out name of csv file (remove quotes) */ + filename := args[3] + if filename[0] == '\'' { + filename = filename[1 : len(filename)-1] + } + /* if a custom delimiter specified, pull it out */ + var separator byte = ',' + /* should the header zRow be used */ + header := false + quoted := true + guess := true + var cols, types []string + for i := 4; i < len(args); i++ { + arg := args[i] + switch { + case types != nil: + if arg[0] == '\'' { + arg = arg[1 : len(arg)-1] + } + types = append(types, arg) + case cols != nil: + if strings.ToUpper(arg) == "_TYPES_" { + types = make([]string, 0, len(cols)) + } else { + cols = append(cols, arg) + } + case len(arg) == 1: + separator = arg[0] + guess = false + case len(arg) == 3 && arg[0] == '\'': + separator = arg[1] + guess = false + case strings.Contains(strings.ToUpper(arg), "HEADER"): + header = true + case strings.Contains(strings.ToUpper(arg), "NO_QUOTE"): + quoted = false + case strings.Contains(strings.ToUpper(arg), "NAMES"): + cols = make([]string, 0, 10) + case strings.Contains(strings.ToUpper(arg), "TYPES"): + types = make([]string, 0, 10) + } + } + /* open the source csv file */ + file, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening CSV file: '%s'", filename) + } + defer file.Close() + /* Read first zRow to obtain column names/number */ + vTab := &csvTab{f: filename, sep: separator, quoted: quoted, cols: make([]string, 0, 10)} + vTab.maxLength = int(c.Limit(LimitLength)) + vTab.maxColumn = int(c.Limit(LimitColumn)) + + reader := yacr.NewReader(file, separator, quoted, guess) + if header { + reader.Split(vTab.split(reader.ScanField)) + } + if err = vTab.readRow(reader); err != nil { + return nil, err + } + named := header + if len(cols) > 0 { // headers ignored + // TODO check len(cols) == len(vTab.cols) ? + vTab.cols = cols + named = true + } + if len(vTab.cols) == 0 { + if len(types) == 0 { + return nil, errors.New("no column name/type specified") + } + vTab.cols = types + } + + if guess { + vTab.sep = reader.Sep() + } + /* Create the underlying relational database schema. If + * that is successful, call sqlite3_declare_vtab() to configure + * the csv table schema. + */ + sql := "CREATE TABLE x(" + tail := ", " + for i, col := range vTab.cols { + if i == len(vTab.cols)-1 { + tail = ");" + } + colType := "" + if len(types) > i { + colType = " " + types[i] + } + if named { + if len(col) == 0 { + return nil, errors.New("no column name found") + } + sql = fmt.Sprintf("%s\"%s\"%s%s", sql, col, colType, tail) + } else { + sql = fmt.Sprintf("%scol%d%s%s", sql, i+1, colType, tail) + } + } + if err = c.DeclareVTab(sql); err != nil { + return nil, err + } + + vTab.affinities = make([]Affinity, len(vTab.cols)) + if len(types) > 0 { + for i, typ := range types { + if i >= len(vTab.affinities) { + break + } + vTab.affinities[i] = typeAffinity(typ) + } + } + return vTab, nil +} +func (m csvModule) Connect(c *Conn, args []string) (VTab, error) { + return m.Create(c, args) +} + +func (m csvModule) DestroyModule() { // nothing to do +} + +type csvTab struct { + f string + sep byte + quoted bool + eof bool + offsetFirstRow int64 + cols []string + affinities []Affinity + + maxLength int + maxColumn int +} + +func (v *csvTab) split(original bufio.SplitFunc) bufio.SplitFunc { + return func(data []byte, atEOF bool) (advance int, token []byte, err error) { + advance, token, err = original(data, atEOF) + v.offsetFirstRow += int64(advance) + return + } +} + +func (v *csvTab) readRow(r *yacr.Reader) error { + v.cols = v.cols[:0] + for { + if !r.Scan() { + err := r.Err() + v.eof = err == nil + return err + } + if r.EndOfRecord() && len(r.Bytes()) == 0 { // skip empty line (or line comment) + continue + } + col := r.Text() + if len(col) >= v.maxLength { + return fmt.Errorf("CSV row is too long (>= %d)", v.maxLength) + } + v.cols = append(v.cols, col) + if len(v.cols) >= v.maxColumn { + return fmt.Errorf("too many columns (>= %d)", v.maxColumn) + } + if r.EndOfRecord() { + break + } + } + return nil +} + +func (v *csvTab) BestIndex() error { + return nil +} +func (v *csvTab) Disconnect() error { + return nil +} +func (v *csvTab) Destroy() error { + return nil +} +func (v *csvTab) Open() (VTabCursor, error) { + f, err := os.Open(v.f) + if err != nil { + return nil, err + } + return &csvTabCursor{vTab: v, f: f, rowNumber: 0}, nil +} + +type csvTabCursor struct { + vTab *csvTab + f *os.File + r *yacr.Reader + rowNumber int64 +} + +func (vc *csvTabCursor) Close() error { + return vc.f.Close() +} +func (vc *csvTabCursor) Filter() error { + v := vc.vTab + /* seek back to start of first zRow */ + v.eof = false + if _, err := vc.f.Seek(v.offsetFirstRow, os.SEEK_SET); err != nil { + return err + } + vc.rowNumber = 0 + /* a new reader/scanner must be created because there is no way to reset its internal buffer/state (which has been invalidated by the SEEK_SET)*/ + vc.r = yacr.NewReader(vc.f, v.sep, v.quoted, false) + /* read and parse next line */ + return vc.Next() +} +func (vc *csvTabCursor) Next() error { + v := vc.vTab + if v.eof { + return io.EOF + } + if vc.r == nil { + vc.r = yacr.NewReader(vc.f, v.sep, v.quoted, false) + } + /* read the next row of data */ + err := v.readRow(vc.r) + if err == nil { + vc.rowNumber++ + } + return err +} +func (vc *csvTabCursor) EOF() bool { + return vc.vTab.eof +} +func (vc *csvTabCursor) Column(c *Context, col int) error { + cols := vc.vTab.cols + if col < 0 || col >= len(cols) { + return fmt.Errorf("column index out of bounds: %d", col) + } + if cols == nil { + c.ResultNull() + return nil + } + affinity := vc.vTab.affinities[col] + if affinity == Integral || affinity == Numerical { + if i, err := strconv.ParseInt(cols[col], 10, 64); err == nil { + c.ResultInt64(i) + return nil + } + } + if affinity == Real || affinity == Numerical { + if f, err := strconv.ParseFloat(cols[col], 64); err == nil { + c.ResultDouble(f) + return nil + } + } + c.ResultText(cols[col]) + return nil +} +func (vc *csvTabCursor) Rowid() (int64, error) { + return vc.rowNumber, nil +} + +// LoadCsvModule loads CSV virtual table module. +// CREATE VIRTUAL TABLE vtab USING csv('test.csv', USE_HEADER_ROW, NO_QUOTE) +func LoadCsvModule(db *Conn) error { + return db.CreateModule("csv", csvModule{}) +} + +// ExportTableToCSV exports table or view content to CSV. +// 'headers' flag turns output of headers on or off. +// NULL values are output as specified by 'nullvalue' parameter. +func (db *Conn) ExportTableToCSV(dbName, table string, nullvalue string, headers bool, w *yacr.Writer) error { + var sql string + if len(dbName) == 0 { + sql = fmt.Sprintf(`SELECT * FROM "%s"`, escapeQuote(table)) + } else { + sql = fmt.Sprintf(`SELECT * FROM %s."%s"`, doubleQuote(dbName), escapeQuote(table)) + } + s, err := db.prepare(sql) + if err != nil { + return err + } + defer s.finalize() + return s.ExportToCSV(nullvalue, headers, w) +} + +// ExportToCSV exports statement result to CSV. +// 'headers' flag turns output of headers on or off. +// NULL values are output as specified by 'nullvalue' parameter. +func (s *Stmt) ExportToCSV(nullvalue string, headers bool, w *yacr.Writer) error { + if headers { + for _, header := range s.ColumnNames() { + w.Write([]byte(header)) + } + w.EndOfRecord() + if err := w.Err(); err != nil { + return err + } + } + s.Select(func(s *Stmt) error { + for i := 0; i < s.ColumnCount(); i++ { + rb, null := s.ScanRawBytes(i) + if null { + w.Write([]byte(nullvalue)) + } else { + w.Write(rb) + } + } + w.EndOfRecord() + return w.Err() + }) + w.Flush() + return w.Err() +} + +// ImportConfig gathers import parameters. +type ImportConfig struct { + Name string // the name of the input; used only for error reports + Separator byte // CSV separator + Quoted bool // CSV fields are quoted or not + Guess bool // guess separator + Trim bool // optional, trim spaces + Comment byte // optinal, comment marker + Headers bool // skip headers (first line) + Types []Affinity // optional, when target table does not exist, specify columns type + Log io.Writer // optional, used to trace lines in error +} + +func (ic ImportConfig) getType(i int) string { + if i >= len(ic.Types) || ic.Types[i] == Textual { + return "TEXT" + } + if ic.Types[i] == Integral { + return "INT" + } + if ic.Types[i] == Real { + return "REAL" + } + if ic.Types[i] == Numerical { + return "NUMERIC" + } + return "" +} + +// ImportCSV imports CSV data into the specified table (which may not exist yet). +// Code is adapted from .import command implementation in SQLite3 shell sources. +func (db *Conn) ImportCSV(in io.Reader, ic ImportConfig, dbName, table string) error { + columns, err := db.Columns(dbName, table) + if err != nil { + return err + } + r := yacr.NewReader(in, ic.Separator, ic.Quoted, ic.Guess) + r.Trim = ic.Trim + r.Comment = ic.Comment + nCol := len(columns) + if nCol == 0 { // table does not exist, let's create it + var sql string + if len(dbName) == 0 { + sql = fmt.Sprintf(`CREATE TABLE "%s" `, escapeQuote(table)) + } else { + sql = fmt.Sprintf(`CREATE TABLE %s."%s" `, doubleQuote(dbName), escapeQuote(table)) + } + sep := '(' + // TODO if headers flag is false... + for i := 0; r.Scan(); i++ { + if i == 0 && r.EndOfRecord() && len(r.Bytes()) == 0 { // empty line + i = -1 + continue + } + sql += fmt.Sprintf("%c\n \"%s\" %s", sep, r.Text(), ic.getType(i)) + sep = ',' + nCol++ + if r.EndOfRecord() { + break + } + } + if err = r.Err(); err != nil { + return err + } + if sep == '(' { + return errors.New("empty file/input") + } + sql += "\n)" + if err = db.FastExec(sql); err != nil { + return err + } + } else if ic.Headers { // skip headers line + for r.Scan() { + if r.EndOfRecord() { + break + } + } + if err = r.Err(); err != nil { + return err + } + } + + var sql string + if len(dbName) == 0 { + sql = fmt.Sprintf(`INSERT INTO "%s" VALUES (?%s)`, escapeQuote(table), strings.Repeat(", ?", nCol-1)) + } else { + sql = fmt.Sprintf(`INSERT INTO %s."%s" VALUES (?%s)`, doubleQuote(dbName), escapeQuote(table), strings.Repeat(", ?", nCol-1)) + } + s, err := db.prepare(sql) + if err != nil { + return err + } + defer s.Finalize() + ac := db.GetAutocommit() + if ac { + if err = db.Begin(); err != nil { + return err + } + } + defer func() { + if err != nil && ac { + _ = db.Rollback() + } + }() + startLine := r.LineNumber() + for i := 1; r.Scan(); i++ { + if i == 1 && r.EndOfRecord() && len(r.Bytes()) == 0 { // empty line + i = 0 + startLine = r.LineNumber() + continue + } + if i <= nCol { + if err = s.BindByIndex(i, r.Text()); err != nil { + return err + } + } + if r.EndOfRecord() { + if i < nCol { + if ic.Log != nil { + fmt.Fprintf(ic.Log, "%s:%d: expected %d columns but found %d - filling the rest with NULL\n", ic.Name, startLine, nCol, i) + } + for ; i <= nCol; i++ { + if err = s.BindByIndex(i, nil); err != nil { + return err + } + } + } else if i > nCol && ic.Log != nil { + fmt.Fprintf(ic.Log, "%s:%d: expected %d columns but found %d - extras ignored\n", ic.Name, startLine, nCol, i) + } + if _, err = s.Next(); err != nil { + return err + } + i = 0 + startLine = r.LineNumber() + } + } + if err = r.Err(); err != nil { + return err + } + if ac { + if err = db.Commit(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gwenn/gosqlite/date.go b/vendor/github.com/gwenn/gosqlite/date.go new file mode 100644 index 0000000..6d8fa14 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/date.go @@ -0,0 +1,142 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +import ( + "bytes" + "database/sql/driver" + "fmt" + "time" +) + +const ( + julianDay = 2440587.5 // 1970-01-01 00:00:00 is JD 2440587.5 + dayInSeconds = 60 * 60 * 24 +) + +// JulianDayToUTC transforms a julian day number into an UTC Time. +func JulianDayToUTC(jd float64) time.Time { + jd -= julianDay + jd *= dayInSeconds + return time.Unix(int64(jd), 0).UTC() +} + +// JulianDayToLocalTime transforms a julian day number into a local Time. +func JulianDayToLocalTime(jd float64) time.Time { + jd -= julianDay + jd *= dayInSeconds + return time.Unix(int64(jd), 0) +} + +// JulianDay converts a Time into a julian day number. +func JulianDay(t time.Time) float64 { + ns := float64(t.Unix()) + if ns >= 0 { + ns += 0.5 + } + return ns/dayInSeconds + julianDay +} + +// UnixTime is an alias used to persist time as int64 (max precision is 1s and timezone is lost) +type UnixTime struct { + time.Time +} + +// Scan implements the database/sql/Scanner interface. +func (t *UnixTime) Scan(src interface{}) error { + if src == nil { + t.Time = time.Time{} + return nil + } else if unixepoch, ok := src.(int64); ok { + t.Time = time.Unix(unixepoch, 0) // local time + return nil + } + return fmt.Errorf("unsupported UnixTime src: %T, %v", src, src) +} + +// Value implements the database/sql/driver/Valuer interface +func (t UnixTime) Value() (driver.Value, error) { + if t.IsZero() { + return nil, nil + } + return t.Unix(), nil +} + +// JulianTime is an alias used to persist time as float64 (max precision is 1s and timezone is lost) +type JulianTime struct { + time.Time +} + +// Scan implements the database/sql/Scanner interface. +func (t *JulianTime) Scan(src interface{}) error { + if src == nil { + t.Time = time.Time{} + return nil + } else if jd, ok := src.(int64); ok { + t.Time = JulianDayToLocalTime(float64(jd)) // local time + return nil + } else if jd, ok := src.(float64); ok { + t.Time = JulianDayToLocalTime(jd) // local time + return nil + } + return fmt.Errorf("unsupported JulianTime src: %T", src) +} + +// Value implements the database/sql/driver/Valuer interface +func (t JulianTime) Value() (driver.Value, error) { + if t.IsZero() { + return nil, nil + } + return JulianDay(t.Time), nil +} + +// TimeStamp is an alias used to persist time as '2006-01-02T15:04:05.000Z07:00' string +type TimeStamp struct { + time.Time +} + +// Scan implements the database/sql/Scanner interface. +func (t *TimeStamp) Scan(src interface{}) error { + if src == nil { + t.Time = time.Time{} + return nil + } else if txt, ok := src.(string); ok { + v, err := time.Parse("2006-01-02T15:04:05.000Z07:00", txt) + if err != nil { + return err + } + t.Time = v + return nil + } + return fmt.Errorf("unsupported TimeStamp src: %T", src) +} + +// Value implements the database/sql/driver/Valuer interface +func (t TimeStamp) Value() (driver.Value, error) { + if t.IsZero() { + return nil, nil + } + return t.Format("2006-01-02T15:04:05.000Z07:00"), nil +} + +// MarshalText encoding.TextMarshaler interface. +// TimeStamp is formatted as null when zero or RFC3339. +func (t TimeStamp) MarshalText() ([]byte, error) { + if t.IsZero() { + return []byte("null"), nil + } + return t.Time.MarshalText() +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Date is expected in RFC3339 format or null. +func (t *TimeStamp) UnmarshalText(data []byte) error { + if bytes.Equal(data, []byte("null")) { + t.Time = time.Time{} + return nil + } + ti := &t.Time + return ti.UnmarshalText(data) +} diff --git a/vendor/github.com/gwenn/gosqlite/driver.go b/vendor/github.com/gwenn/gosqlite/driver.go new file mode 100644 index 0000000..c0076e9 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/driver.go @@ -0,0 +1,481 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +import ( + "context" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "io" + "log" + "os" + "reflect" + "time" +) + +func init() { + sql.Register("sqlite3", &impl{open: defaultOpen}) + if os.Getenv("SQLITE_LOG") != "" { + ConfigLog(func(d interface{}, err error, msg string) { + log.Printf("%s: %s, %s\n", d, err, msg) + }, "SQLITE") + } + ConfigMemStatus(false) +} + +// impl is an adapter to database/sql/driver +// https://golang.org/pkg/database/sql/driver/#Driver +type impl struct { + open func(name string) (*Conn, error) + configure func(*Conn) error +} + +// https://golang.org/pkg/database/sql/driver/#Conn +type conn struct { + c *Conn +} + +// https://golang.org/pkg/database/sql/driver/#Stmt +type stmt struct { + s *Stmt + rowsRef bool // true if there is a rowsImpl associated to this statement that has not been closed. + pendingClose bool +} + +// https://golang.org/pkg/database/sql/driver/#Rows +type rowsImpl struct { + s *stmt + columnNames []string // cache + ctx context.Context +} + +// https://golang.org/pkg/database/sql/driver/#Result +type result struct { + id int64 + rows int64 +} + +// https://golang.org/pkg/database/sql/driver/#Result +func (r *result) LastInsertId() (int64, error) { + return r.id, nil +} + +// https://golang.org/pkg/database/sql/driver/#Result +func (r *result) RowsAffected() (int64, error) { + return r.rows, nil +} + +// NewDriver creates a new driver with specialized connection creation/configuration. +// NewDriver(customOpen, nil) // no post-creation hook +// NewDriver(nil, customConfigure) // default connection creation but specific configuration step +func NewDriver(open func(name string) (*Conn, error), configure func(*Conn) error) driver.Driver { + if open == nil { + open = defaultOpen + } + return &impl{open: open, configure: configure} +} + +var defaultOpen = func(name string) (*Conn, error) { + // OpenNoMutex == multi-thread mode (http://sqlite.org/compile.html#threadsafe and http://sqlite.org/threadsafe.html) + c, err := Open(name, OpenURI, OpenNoMutex, OpenReadWrite, OpenCreate) + if err != nil { + return nil, err + } + c.BusyTimeout(10 * time.Second) + //c.DefaultTimeLayout = "2006-01-02 15:04:05.999999999" + c.ScanNumericalAsTime = true + return c, nil +} + +// Open opens a new database connection. +// ":memory:" for memory db, +// "" for temp file db +// https://golang.org/pkg/database/sql/driver/#Driver +func (d *impl) Open(name string) (driver.Conn, error) { + c, err := d.open(name) + if err != nil { + return nil, err + } + if d.configure != nil { + if err = d.configure(c); err != nil { + _ = c.Close() + return nil, err + } + } + return &conn{c}, nil +} + +// Unwrap gives access to underlying driver connection. +func Unwrap(db *sql.DB) *Conn { + _, err := db.Exec("unwrap") + if cerr, ok := err.(ConnError); ok { + return cerr.c + } + return nil +} + +// https://golang.org/pkg/database/sql/driver/#Pinger +func (c *conn) Ping(ctx context.Context) error { + if c.c.IsClosed() { + return driver.ErrBadConn + } + _, err := c.ExecContext(ctx, "PRAGMA schema_verion", []driver.NamedValue{}) + return err +} + +// PRAGMA schema_version may be used to detect when the database schema is altered + +// https://golang.org/pkg/database/sql/driver/#Conn +func (c *conn) Prepare(query string) (driver.Stmt, error) { + panic("use PrepareContext") +} + +// https://golang.org/pkg/database/sql/driver/#ConnPrepareContext +func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if c.c.IsClosed() { + return nil, driver.ErrBadConn + } + s, err := c.c.Prepare(query) + if err != nil { + return nil, err + } + return &stmt{s: s}, nil +} + +// https://golang.org/pkg/database/sql/driver/#ExecerContext +func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + if c.c.IsClosed() { + return nil, driver.ErrBadConn + } + if ctx.Done() != nil { + c.c.ProgressHandler(progressHandler, 100, ctx) + defer c.c.ProgressHandler(nil, 0, nil) + } + if len(args) == 0 { + if query == "unwrap" { + return nil, ConnError{c: c.c} + } + if err := c.c.FastExec(query); err != nil { + return nil, ctxError(ctx, err) + } + return c.c.result(), nil + } + for len(query) > 0 { + s, err := c.c.Prepare(query) + if err != nil { + return nil, ctxError(ctx, err) + } else if s.stmt == nil { + // this happens for a comment or white-space + query = s.tail + continue + } + var subargs []driver.NamedValue + count := s.BindParameterCount() + if len(s.tail) > 0 && len(args) >= count { + subargs = args[:count] + args = args[count:] + } else { + subargs = args + } + if err = s.bindNamedValue(subargs); err != nil { + return nil, ctxError(ctx, err) + } + err = s.exec() + if err != nil { + s.finalize() + return nil, ctxError(ctx, err) + } + if err = s.finalize(); err != nil { + return nil, ctxError(ctx, err) + } + query = s.tail + } + return c.c.result(), nil +} + +// https://golang.org/pkg/database/sql/driver/#QueryerContext +func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + if c.c.IsClosed() { + return nil, driver.ErrBadConn + } + st, err := c.c.Prepare(query) + if err != nil { + return nil, err + } + s := &stmt{s: st} + return s.QueryContext(ctx, args) +} + +// https://golang.org/pkg/database/sql/driver/#Conn +func (c *conn) Close() error { + return c.c.Close() +} + +// https://golang.org/pkg/database/sql/driver/#Conn +// Deprecated +func (c *conn) Begin() (driver.Tx, error) { + if c.c.IsClosed() { + return nil, driver.ErrBadConn + } + if err := c.c.Begin(); err != nil { + return nil, err + } + return c, nil +} + +// https://golang.org/pkg/database/sql/driver/#ConnBeginTx +func (c *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if c.c.IsClosed() { + return nil, driver.ErrBadConn + } + if !c.c.GetAutocommit() { + return nil, errors.New("Nested transactions are not supported") + } + if err := c.c.SetQueryOnly("", opts.ReadOnly); err != nil { + return nil, err + } + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault, sql.LevelSerializable: + if err := c.c.FastExec("PRAGMA read_uncommitted=0"); err != nil { + return nil, err + } + case sql.LevelReadUncommitted: + if err := c.c.FastExec("PRAGMA read_uncommitted=1"); err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("isolation level %d is not supported", opts.Isolation) + } + return c.Begin() +} + +// https://golang.org/pkg/database/sql/driver/#Tx +func (c *conn) Commit() error { + return c.c.Commit() +} + +// https://golang.org/pkg/database/sql/driver/#Tx +func (c *conn) Rollback() error { + return c.c.Rollback() +} + +// https://golang.org/pkg/database/sql/driver/#SessionResetter +func (c *conn) ResetSession(ctx context.Context) error { + // closed or pending transaction or at least one statement busy + if c.c.IsClosed() || !c.c.GetAutocommit() /*|| c.c.IsBusy()*/ { + return driver.ErrBadConn + } + return nil +} + +// https://golang.org/pkg/database/sql/driver/#Stmt +func (s *stmt) Close() error { + if s.rowsRef { // Currently, it never happens because the sql.Stmt doesn't call driver.Stmt in this case + s.pendingClose = true + return nil + } + return s.s.Finalize() +} + +// https://golang.org/pkg/database/sql/driver/#Stmt +func (s *stmt) NumInput() int { + return s.s.BindParameterCount() +} + +// https://golang.org/pkg/database/sql/driver/#Stmt +// Deprecated +func (s *stmt) Exec(args []driver.Value) (driver.Result, error) { + panic("Using ExecContext") +} + +// https://golang.org/pkg/database/sql/driver/#Stmt +// Deprecated +func (s *stmt) Query(args []driver.Value) (driver.Rows, error) { + panic("Use QueryContext") +} + +// https://golang.org/pkg/database/sql/driver/#StmtExecContext +func (s *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + if err := s.s.bindNamedValue(args); err != nil { + return nil, err + } + if ctx.Done() != nil { + s.s.c.ProgressHandler(progressHandler, 100, ctx) + defer s.s.c.ProgressHandler(nil, 0, nil) + } + if err := s.s.exec(); err != nil { + return nil, ctxError(ctx, err) + } + return s.s.c.result(), nil +} + +// https://golang.org/pkg/database/sql/driver/#StmtQueryContext +func (s *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + if s.rowsRef { + return nil, errors.New("previously returned Rows still not closed") + } + if err := s.s.bindNamedValue(args); err != nil { + return nil, err + } + s.rowsRef = true + if ctx.Done() != nil { + s.s.c.ProgressHandler(progressHandler, 100, ctx) + } + return &rowsImpl{s, nil, ctx}, nil +} + +func (s *stmt) bind(args []driver.Value) error { + for i, v := range args { + if err := s.s.BindByIndex(i+1, v); err != nil { + return err + } + } + return nil +} + +// https://golang.org/pkg/database/sql/driver/#Rows +func (r *rowsImpl) Columns() []string { + if r.columnNames == nil { + r.columnNames = r.s.s.ColumnNames() + } + return r.columnNames +} + +// https://golang.org/pkg/database/sql/driver/#Rows +func (r *rowsImpl) Next(dest []driver.Value) error { + ok, err := r.s.s.Next() + if err != nil { + return ctxError(r.ctx, err) + } + if !ok { + return io.EOF + } + for i := range dest { + dest[i], _ = r.s.s.ScanValue(i) + /*if !driver.IsScanValue(dest[i]) { + panic("Invalid type returned by ScanValue") + }*/ + } + return nil +} + +// https://golang.org/pkg/database/sql/driver/#Rows +func (r *rowsImpl) Close() error { + if r.ctx.Done() != nil { + r.s.s.c.ProgressHandler(nil, 0, nil) + } + r.s.rowsRef = false + if r.s.pendingClose { + return r.s.Close() + } + return r.s.s.Reset() +} + +// https://golang.org/pkg/database/sql/driver/#RowsNextResultSet +func (r *rowsImpl) HasNextResultSet() bool { + return len(r.s.s.tail) > 0 +} + +// https://golang.org/pkg/database/sql/driver/#RowsNextResultSet +func (r *rowsImpl) NextResultSet() error { + currentStmt := r.s.s + nextQuery := currentStmt.tail + var nextStmt *Stmt + var err error + for len(nextQuery) > 0 { + nextStmt, err = currentStmt.c.Prepare(nextQuery) + if err != nil { + return err + } else if nextStmt.stmt == nil { + // this happens for a comment or white-space + nextQuery = nextStmt.tail + continue + } + break + } + if nextStmt == nil { + return io.EOF + } + // TODO close vs reset ? + err = currentStmt.Finalize() + if err != nil { + return err + } + r.s.s = nextStmt + return nil +} + +// https://golang.org/pkg/database/sql/driver/#RowsColumnTypeScanType +func (r *rowsImpl) ColumnTypeScanType(index int) reflect.Type { + switch r.s.s.ColumnType(index) { + case Integer: + return reflect.TypeOf(int64(0)) + case Float: + return reflect.TypeOf(float64(0)) + case Text: + return reflect.TypeOf("") + case Null: + return reflect.TypeOf(nil) + case Blob: + fallthrough + default: + return reflect.TypeOf([]byte{}) + } +} + +// https://golang.org/pkg/database/sql/driver/#RowsColumnTypeDatabaseTypeName +func (r *rowsImpl) ColumnTypeDatabaseTypeName(index int) string { + return r.s.s.ColumnDeclaredType(index) +} + +func (c *Conn) result() driver.Result { + // TODO How to know that the last Stmt has done an INSERT? An authorizer? + id := c.LastInsertRowid() + // TODO How to know that the last Stmt has done a DELETE/INSERT/UPDATE? An authorizer? + rows := int64(c.Changes()) + return &result{id, rows} // FIXME RowAffected/noRows +} + +func (s *Stmt) bindNamedValue(args []driver.NamedValue) error { + for _, v := range args { + if len(v.Name) == 0 { + if err := s.BindByIndex(v.Ordinal, v.Value); err != nil { + return err + } + } else { + index, err := s.BindParameterIndex(":" + v.Name) // TODO "$" and "@" + if err != nil { + return err + } + if err = s.BindByIndex(index, v.Value); err != nil { + return err + } + } + } + return nil +} + +func progressHandler(p interface{}) bool { + if ctx, ok := p.(context.Context); ok { + select { + case <-ctx.Done(): + // Cancelled + return true + default: + return false + } + } + return false +} + +func ctxError(ctx context.Context, err error) error { + ctxErr := ctx.Err() + if ctxErr != nil { + return ctxErr + } + return err +} diff --git a/vendor/github.com/gwenn/gosqlite/function.c b/vendor/github.com/gwenn/gosqlite/function.c new file mode 100644 index 0000000..4f289d1 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/function.c @@ -0,0 +1,34 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include "_cgo_export.h" + +void goSqlite3SetAuxdata(sqlite3_context *ctx, int N, void *ad) { + sqlite3_set_auxdata(ctx, N, ad, goXAuxDataDestroy); +} + +static inline void cXFunc(sqlite3_context *ctx, int argc, sqlite3_value **argv) { + void *udf = sqlite3_user_data(ctx); + void *goctx = sqlite3_get_auxdata(ctx, 0); + goXFunc(ctx, udf, goctx, argc, argv); +} + +static inline void cXStep(sqlite3_context *ctx, int argc, sqlite3_value **argv) { + void *udf = sqlite3_user_data(ctx); + goXStep(ctx, udf, argc, argv); +} + +static inline void cXFinal(sqlite3_context *ctx) { + void *udf = sqlite3_user_data(ctx); + goXFinal(ctx, udf); +} + +int goSqlite3CreateScalarFunction(sqlite3 *db, const char *zFunctionName, int nArg, int eTextRep, void *pApp) { + return sqlite3_create_function_v2(db, zFunctionName, nArg, eTextRep, pApp, cXFunc, 0, 0, goXDestroy); +} +int goSqlite3CreateAggregateFunction(sqlite3 *db, const char *zFunctionName, int nArg, int eTextRep, void *pApp) { + return sqlite3_create_function_v2(db, zFunctionName, nArg, eTextRep, pApp, 0, cXStep, cXFinal, goXDestroy); +} diff --git a/vendor/github.com/gwenn/gosqlite/function.go b/vendor/github.com/gwenn/gosqlite/function.go new file mode 100644 index 0000000..dceff19 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/function.go @@ -0,0 +1,524 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include +// These wrappers are necessary because SQLITE_TRANSIENT +// is a pointer constant, and cgo doesn't translate them correctly. + +static inline void my_result_text(sqlite3_context *ctx, char *p, int np) { + sqlite3_result_text(ctx, p, np, SQLITE_TRANSIENT); +} +static inline void my_result_blob(sqlite3_context *ctx, void *p, int np) { + sqlite3_result_blob(ctx, p, np, SQLITE_TRANSIENT); +} + +static inline void my_result_value(sqlite3_context *ctx, sqlite3_value **argv, int i) { + sqlite3_result_value(ctx, argv[i]); +} + +static inline const void *my_value_blob(sqlite3_value **argv, int i) { + return sqlite3_value_blob(argv[i]); +} +static inline int my_value_bytes(sqlite3_value **argv, int i) { + return sqlite3_value_bytes(argv[i]); +} +static inline double my_value_double(sqlite3_value **argv, int i) { + return sqlite3_value_double(argv[i]); +} +static inline int my_value_int(sqlite3_value **argv, int i) { + return sqlite3_value_int(argv[i]); +} +static inline sqlite3_int64 my_value_int64(sqlite3_value **argv, int i) { + return sqlite3_value_int64(argv[i]); +} +static inline const unsigned char *my_value_text(sqlite3_value **argv, int i) { + return sqlite3_value_text(argv[i]); +} +static inline int my_value_type(sqlite3_value **argv, int i) { + return sqlite3_value_type(argv[i]); +} +static inline int my_value_numeric_type(sqlite3_value **argv, int i) { + return sqlite3_value_numeric_type(argv[i]); +} + +void goSqlite3SetAuxdata(sqlite3_context *ctx, int N, void *ad); +int goSqlite3CreateScalarFunction(sqlite3 *db, const char *zFunctionName, int nArg, int eTextRep, void *pApp); +int goSqlite3CreateAggregateFunction(sqlite3 *db, const char *zFunctionName, int nArg, int eTextRep, void *pApp); +*/ +import "C" + +import ( + "fmt" + "math" + "reflect" + "unsafe" +) + +/* +Database Connection For Functions +http://sqlite.org/c3ref/context_db_handle.html + +sqlite3 *sqlite3_context_db_handle(sqlite3_context*); +*/ + +// Context common to function and virtual table +// (See http://sqlite.org/c3ref/context.html) +type Context C.sqlite3_context + +// FunctionContext common to scalar and aggregate functions +// (See http://sqlite.org/c3ref/context.html) +type FunctionContext struct { + sc *Context + argv **C.sqlite3_value +} + +// ScalarContext is used to represent context associated to scalar function +type ScalarContext struct { + FunctionContext + ad map[int]interface{} // Function Auxiliary Data + udf *sqliteFunction +} + +// AggregateContext is used to represent context associated to aggregate function +type AggregateContext struct { + FunctionContext + Aggregate interface{} +} + +// Result sets the result of an SQL function. +func (c *FunctionContext) Result(r interface{}) { + switch r := r.(type) { + case nil: + c.ResultNull() + case string: + c.ResultText(r) + case int: + c.ResultInt(r) + case int64: + c.ResultInt64(r) + case byte: + c.ResultInt(int(r)) + case bool: + c.ResultBool(r) + case float32: + c.ResultDouble(float64(r)) + case float64: + c.ResultDouble(r) + case []byte: + c.ResultBlob(r) + case ZeroBlobLength: + c.ResultZeroblob(r) + case error: + c.ResultError(r.Error()) + case Errno: + c.ResultErrorCode(r) + default: + panic(fmt.Sprintf("unsupported type in Result: %q", reflect.TypeOf(r))) + } +} + +// ResultBool sets the result of an SQL function. +func (c *Context) ResultBool(b bool) { + if b { + c.ResultInt(1) + } else { + c.ResultInt(0) + } +} + +// ResultBool sets the result of an SQL function. +func (c *FunctionContext) ResultBool(b bool) { + c.sc.ResultBool(b) +} + +// ResultBlob sets the result of an SQL function. +// (See sqlite3_result_blob, http://sqlite.org/c3ref/result_blob.html) +func (c *Context) ResultBlob(b []byte) { + if i64 && len(b) > math.MaxInt32 { + C.sqlite3_result_error_toobig((*C.sqlite3_context)(c)) + return + } + var p *byte + if len(b) > 0 { + p = &b[0] + } + C.my_result_blob((*C.sqlite3_context)(c), unsafe.Pointer(p), C.int(len(b))) +} + +// ResultBlob sets the result of an SQL function. +func (c *FunctionContext) ResultBlob(b []byte) { + c.sc.ResultBlob(b) +} + +// ResultDouble sets the result of an SQL function. +// (See sqlite3_result_double, http://sqlite.org/c3ref/result_blob.html) +func (c *Context) ResultDouble(d float64) { + C.sqlite3_result_double((*C.sqlite3_context)(c), C.double(d)) +} + +// ResultDouble sets the result of an SQL function. +func (c *FunctionContext) ResultDouble(d float64) { + c.sc.ResultDouble(d) +} + +// ResultError sets the result of an SQL function. +// (See sqlite3_result_error, http://sqlite.org/c3ref/result_blob.html) +func (c *FunctionContext) ResultError(msg string) { + cs, l := cstring(msg) + C.sqlite3_result_error((*C.sqlite3_context)(c.sc), cs, l) +} + +// ResultErrorTooBig sets the result of an SQL function. +// (See sqlite3_result_error_toobig, http://sqlite.org/c3ref/result_blob.html) +func (c *FunctionContext) ResultErrorTooBig() { + C.sqlite3_result_error_toobig((*C.sqlite3_context)(c.sc)) +} + +// ResultErrorNoMem sets the result of an SQL function. +// (See sqlite3_result_error_nomem, http://sqlite.org/c3ref/result_blob.html) +func (c *FunctionContext) ResultErrorNoMem() { + C.sqlite3_result_error_nomem((*C.sqlite3_context)(c.sc)) +} + +// ResultErrorCode sets the result of an SQL function. +// (See sqlite3_result_error_code, http://sqlite.org/c3ref/result_blob.html) +func (c *FunctionContext) ResultErrorCode(e Errno) { + C.sqlite3_result_error_code((*C.sqlite3_context)(c.sc), C.int(e)) +} + +// ResultInt sets the result of an SQL function. +// (See sqlite3_result_int, http://sqlite.org/c3ref/result_blob.html) +func (c *Context) ResultInt(i int) { + if i64 && (i > math.MaxInt32 || i < math.MinInt32) { + C.sqlite3_result_int64((*C.sqlite3_context)(c), C.sqlite3_int64(i)) + } else { + C.sqlite3_result_int((*C.sqlite3_context)(c), C.int(i)) + } +} + +// ResultInt sets the result of an SQL function. +func (c *FunctionContext) ResultInt(i int) { + c.sc.ResultInt(i) +} + +// ResultInt64 sets the result of an SQL function. +// (See sqlite3_result_int64, http://sqlite.org/c3ref/result_blob.html) +func (c *Context) ResultInt64(i int64) { + C.sqlite3_result_int64((*C.sqlite3_context)(c), C.sqlite3_int64(i)) +} + +// ResultInt64 sets the result of an SQL function. +func (c *FunctionContext) ResultInt64(i int64) { + c.sc.ResultInt64(i) +} + +// ResultNull sets the result of an SQL function. +// (See sqlite3_result_null, http://sqlite.org/c3ref/result_blob.html) +func (c *Context) ResultNull() { + C.sqlite3_result_null((*C.sqlite3_context)(c)) +} + +// ResultNull sets the result of an SQL function. +func (c *FunctionContext) ResultNull() { + c.sc.ResultNull() +} + +// ResultText sets the result of an SQL function. +// (See sqlite3_result_text, http://sqlite.org/c3ref/result_blob.html) +func (c *Context) ResultText(s string) { + cs, l := cstring(s) + C.my_result_text((*C.sqlite3_context)(c), cs, l) +} + +// ResultText sets the result of an SQL function. +func (c *FunctionContext) ResultText(s string) { + c.sc.ResultText(s) +} + +// ResultValue sets the result of an SQL function. +// The leftmost value is number 0. +// (See sqlite3_result_value, http://sqlite.org/c3ref/result_blob.html) +func (c *FunctionContext) ResultValue(i int) { + C.my_result_value((*C.sqlite3_context)(c.sc), c.argv, C.int(i)) +} + +// ResultZeroblob sets the result of an SQL function. +// (See sqlite3_result_zeroblob, http://sqlite.org/c3ref/result_blob.html) +func (c *Context) ResultZeroblob(n ZeroBlobLength) { + C.sqlite3_result_zeroblob((*C.sqlite3_context)(c), C.int(n)) +} + +// ResultZeroblob sets the result of an SQL function. +func (c *FunctionContext) ResultZeroblob(n ZeroBlobLength) { + c.sc.ResultZeroblob(n) +} + +// UserData returns the user data for functions. +// (See http://sqlite.org/c3ref/user_data.html) +func (c *FunctionContext) UserData() interface{} { + udf := (*sqliteFunction)(C.sqlite3_user_data((*C.sqlite3_context)(c.sc))) + return udf.pApp +} + +// GetAuxData returns function auxiliary data. +// (See sqlite3_get_auxdata, http://sqlite.org/c3ref/get_auxdata.html) +func (c *ScalarContext) GetAuxData(n int) interface{} { + if len(c.ad) == 0 { + return nil + } + return c.ad[n] +} + +// SetAuxData sets function auxiliary data. +// No destructor is needed a priori +// (See sqlite3_set_auxdata, http://sqlite.org/c3ref/get_auxdata.html) +func (c *ScalarContext) SetAuxData(n int, ad interface{}) { + if len(c.ad) == 0 { + c.ad = make(map[int]interface{}) + } + c.ad[n] = ad +} + +// Bool obtains a SQL function parameter value. +// The leftmost value is number 0. +func (c *FunctionContext) Bool(i int) bool { + return c.Int(i) != 0 +} + +// Blob obtains a SQL function parameter value. +// The leftmost value is number 0. +// (See sqlite3_value_blob and sqlite3_value_bytes, http://sqlite.org/c3ref/value_blob.html) +func (c *FunctionContext) Blob(i int) []byte { + p := C.my_value_blob(c.argv, C.int(i)) + if p == nil { + return nil + } + n := C.my_value_bytes(c.argv, C.int(i)) + // value = (*[1 << 30]byte)(unsafe.Pointer(p))[:n] + return C.GoBytes(p, n) // The memory space used to hold strings and BLOBs is freed automatically. +} + +// Double obtains a SQL function parameter value. +// The leftmost value is number 0. +// (See sqlite3_value_double, http://sqlite.org/c3ref/value_blob.html) +func (c *FunctionContext) Double(i int) float64 { + return float64(C.my_value_double(c.argv, C.int(i))) +} + +// Int obtains a SQL function parameter value. +// The leftmost value is number 0. +// (See sqlite3_value_int, http://sqlite.org/c3ref/value_blob.html) +func (c *FunctionContext) Int(i int) int { + return int(C.my_value_int(c.argv, C.int(i))) +} + +// Int64 obtains a SQL function parameter value. +// The leftmost value is number 0. +// (See sqlite3_value_int64, http://sqlite.org/c3ref/value_blob.html) +func (c *FunctionContext) Int64(i int) int64 { + return int64(C.my_value_int64(c.argv, C.int(i))) +} + +// Text obtains a SQL function parameter value. +// The leftmost value is number 0. +// (See sqlite3_value_text, http://sqlite.org/c3ref/value_blob.html) +func (c *FunctionContext) Text(i int) string { + p := C.my_value_text(c.argv, C.int(i)) + if p == nil { + return "" + } + n := C.my_value_bytes(c.argv, C.int(i)) + return C.GoStringN((*C.char)(unsafe.Pointer(p)), n) +} + +// Type obtains a SQL function parameter value type. +// The leftmost value is number 0. +// (See sqlite3_value_type, http://sqlite.org/c3ref/value_blob.html) +func (c *FunctionContext) Type(i int) Type { + return Type(C.my_value_type(c.argv, C.int(i))) +} + +// NumericType obtains a SQL function parameter value numeric type (with possible conversion). +// The leftmost value is number 0. +// (See sqlite3_value_numeric_type, http://sqlite.org/c3ref/value_blob.html) +func (c *FunctionContext) NumericType(i int) Type { + return Type(C.my_value_numeric_type(c.argv, C.int(i))) +} + +// Value obtains a SQL function parameter value depending on its type. +func (c *FunctionContext) Value(i int) interface{} { + var value interface{} + switch c.Type(i) { + case Null: + value = nil + case Text: + value = c.Text(i) + case Integer: + value = c.Int64(i) + case Float: + value = c.Double(i) + case Blob: + value = c.Blob(i) + default: + panic("The value type is not one of SQLITE_INTEGER, SQLITE_FLOAT, SQLITE_TEXT, SQLITE_BLOB, or SQLITE_NULL") + } + return value +} + +// ScalarFunction is the expected signature of scalar function implemented in Go +type ScalarFunction func(ctx *ScalarContext, nArg int) + +// StepFunction is the expected signature of step function implemented in Go +type StepFunction func(ctx *AggregateContext, nArg int) + +// FinalFunction is the expected signature of final function implemented in Go +type FinalFunction func(ctx *AggregateContext) + +// DestroyDataFunction is the expected signature of function used to finalize user data. +type DestroyDataFunction func(pApp interface{}) + +type sqliteFunction struct { + scalar ScalarFunction + step StepFunction + final FinalFunction + d DestroyDataFunction + pApp interface{} + scalarCtxs map[*ScalarContext]struct{} + aggrCtxs map[*AggregateContext]struct{} +} + +//export goXAuxDataDestroy +func goXAuxDataDestroy(ad unsafe.Pointer) { + c := (*ScalarContext)(ad) + if c != nil { + delete(c.udf.scalarCtxs, c) + } + // fmt.Printf("Contexts: %v\n", c.udf.scalarCtxs) +} + +//export goXFunc +func goXFunc(scp, udfp, ctxp unsafe.Pointer, argc int, argv unsafe.Pointer) { + udf := (*sqliteFunction)(udfp) + // To avoid the creation of a Context at each call, just put it in auxdata + c := (*ScalarContext)(ctxp) + if c == nil { + c = new(ScalarContext) + c.sc = (*Context)(scp) + c.udf = udf + C.goSqlite3SetAuxdata((*C.sqlite3_context)(c.sc), 0, unsafe.Pointer(c)) + // To make sure it is not cged + udf.scalarCtxs[c] = struct{}{} + } + c.argv = (**C.sqlite3_value)(argv) + udf.scalar(c, argc) + c.argv = nil +} + +//export goXStep +func goXStep(scp, udfp unsafe.Pointer, argc int, argv unsafe.Pointer) { + udf := (*sqliteFunction)(udfp) + var cp unsafe.Pointer + cp = C.sqlite3_aggregate_context((*C.sqlite3_context)(scp), C.int(unsafe.Sizeof(cp))) + if cp != nil { + var c *AggregateContext + p := *(*unsafe.Pointer)(cp) + if p == nil { + c = new(AggregateContext) + c.sc = (*Context)(scp) + *(*unsafe.Pointer)(cp) = unsafe.Pointer(c) + // To make sure it is not cged + udf.aggrCtxs[c] = struct{}{} + } else { + c = (*AggregateContext)(p) + } + + c.argv = (**C.sqlite3_value)(argv) + udf.step(c, argc) + c.argv = nil + } +} + +//export goXFinal +func goXFinal(scp, udfp unsafe.Pointer) { + udf := (*sqliteFunction)(udfp) + cp := C.sqlite3_aggregate_context((*C.sqlite3_context)(scp), 0) + if cp != nil { + p := *(*unsafe.Pointer)(cp) + if p != nil { + c := (*AggregateContext)(p) + delete(udf.aggrCtxs, c) + c.sc = (*Context)(scp) + udf.final(c) + } + } + // fmt.Printf("Contexts: %v\n", udf.aggrCtxts) +} + +//export goXDestroy +func goXDestroy(pApp unsafe.Pointer) { + udf := (*sqliteFunction)(pApp) + if udf.d != nil { + udf.d(udf.pApp) + } +} + +const sqliteDeterministic = 0x800 // C.SQLITE_DETERMINISTIC + +// CreateScalarFunction creates or redefines SQL scalar functions. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// TODO Make possible to specify the preferred encoding +// (See http://sqlite.org/c3ref/create_function.html) +func (c *Conn) CreateScalarFunction(functionName string, nArg int32, deterministic bool, pApp interface{}, + f ScalarFunction, d DestroyDataFunction) error { + var eTextRep C.int = C.SQLITE_UTF8 + if deterministic { + eTextRep = eTextRep | sqliteDeterministic + } + fname := C.CString(functionName) + defer C.free(unsafe.Pointer(fname)) + if f == nil { + if len(c.udfs) > 0 { + delete(c.udfs, functionName) + } + return c.error(C.sqlite3_create_function_v2(c.db, fname, C.int(nArg), eTextRep, nil, nil, nil, nil, nil), + fmt.Sprintf("= 1.6 and cgocheck enabled. +// TODO Make possible to specify the preferred encoding +// (See http://sqlite.org/c3ref/create_function.html) +func (c *Conn) CreateAggregateFunction(functionName string, nArg int32, pApp interface{}, + step StepFunction, final FinalFunction, d DestroyDataFunction) error { + fname := C.CString(functionName) + defer C.free(unsafe.Pointer(fname)) + if step == nil { + if len(c.udfs) > 0 { + delete(c.udfs, functionName) + } + return c.error(C.sqlite3_create_function_v2(c.db, fname, C.int(nArg), C.SQLITE_UTF8, nil, nil, nil, nil, nil), + fmt.Sprintf(" +// warning: incompatible pointer types passing +//#include "_cgo_export.h" + +extern int goXCommitHook(void *udp); + +void* goSqlite3CommitHook(sqlite3 *db, void *udp) { + return sqlite3_commit_hook(db, goXCommitHook, udp); +} + +extern void goXRollbackHook(void *udp); + +void* goSqlite3RollbackHook(sqlite3 *db, void *udp) { + return sqlite3_rollback_hook(db, goXRollbackHook, udp); +} + +extern void goXUpdateHook(void *udp, int action, char const *dbName, char const *tableName, sqlite3_int64 rowID); + +void* goSqlite3UpdateHook(sqlite3 *db, void *udp) { + return sqlite3_update_hook(db, goXUpdateHook, udp); +} + +/* +extern int goXWalHook(void *udp, sqlite3* db, const char *dbName, int nEntry); + +void* goSqlite3WalHook(sqlite3 *db, void *udp) { + return sqlite3_wal_hook(db, goXWalHook, udp); +} +*/ \ No newline at end of file diff --git a/vendor/github.com/gwenn/gosqlite/hook.go b/vendor/github.com/gwenn/gosqlite/hook.go new file mode 100644 index 0000000..67a8388 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/hook.go @@ -0,0 +1,133 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include + +void* goSqlite3CommitHook(sqlite3 *db, void *udp); +void* goSqlite3RollbackHook(sqlite3 *db, void *udp); +void* goSqlite3UpdateHook(sqlite3 *db, void *udp); +//void* goSqlite3WalHook(sqlite3 *db, void *udp); +*/ +import "C" + +import ( + "unsafe" +) + +// CommitHook is the callback function signature. +// If the callback on a commit hook function returns true, then the commit is converted into a rollback. +type CommitHook func(udp interface{}) (rollback bool) + +type sqliteCommitHook struct { + f CommitHook + udp interface{} +} + +//export goXCommitHook +func goXCommitHook(udp unsafe.Pointer) C.int { + arg := (*sqliteCommitHook)(udp) + return btocint(arg.f(arg.udp)) +} + +// CommitHook registers a callback function to be invoked whenever a transaction is committed. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// (See http://sqlite.org/c3ref/commit_hook.html) +func (c *Conn) CommitHook(f CommitHook, udp interface{}) { + if f == nil { + c.commitHook = nil + C.sqlite3_commit_hook(c.db, nil, nil) + return + } + // To make sure it is not gced, keep a reference in the connection. + c.commitHook = &sqliteCommitHook{f, udp} + C.goSqlite3CommitHook(c.db, unsafe.Pointer(c.commitHook)) +} + +// RollbackHook is the callback function signature. +type RollbackHook func(udp interface{}) + +type sqliteRollbackHook struct { + f RollbackHook + udp interface{} +} + +//export goXRollbackHook +func goXRollbackHook(udp unsafe.Pointer) { + arg := (*sqliteRollbackHook)(udp) + arg.f(arg.udp) +} + +// RollbackHook registers a callback to be invoked each time a transaction is rolled back. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// (See http://sqlite.org/c3ref/commit_hook.html) +func (c *Conn) RollbackHook(f RollbackHook, udp interface{}) { + if f == nil { + c.rollbackHook = nil + C.sqlite3_rollback_hook(c.db, nil, nil) + return + } + // To make sure it is not gced, keep a reference in the connection. + c.rollbackHook = &sqliteRollbackHook{f, udp} + C.goSqlite3RollbackHook(c.db, unsafe.Pointer(c.rollbackHook)) +} + +// UpdateHook is the callback function signature. +type UpdateHook func(udp interface{}, a Action, dbName, tableName string, rowID int64) + +type sqliteUpdateHook struct { + f UpdateHook + udp interface{} +} + +//export goXUpdateHook +func goXUpdateHook(udp unsafe.Pointer, action int, dbName, tableName *C.char, rowID C.sqlite3_int64) { + arg := (*sqliteUpdateHook)(udp) + arg.f(arg.udp, Action(action), C.GoString(dbName), C.GoString(tableName), int64(rowID)) +} + +// UpdateHook registers a callback to be invoked each time a row is updated, +// inserted or deleted using this database connection. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// (See http://sqlite.org/c3ref/update_hook.html) +func (c *Conn) UpdateHook(f UpdateHook, udp interface{}) { + if f == nil { + c.updateHook = nil + C.sqlite3_update_hook(c.db, nil, nil) + return + } + // To make sure it is not gced, keep a reference in the connection. + c.updateHook = &sqliteUpdateHook{f, udp} + C.goSqlite3UpdateHook(c.db, unsafe.Pointer(c.updateHook)) +} + +/* +type WalHook func(udp interface{}, c *Conn, dbName string, nEntry int) int + +type sqliteWalHook struct { + f WalHook + udp interface{} +} + +//export goXWalHook +func goXWalHook(udp, db unsafe.Pointer, dbName *C.char, nEntry C.int) C.int { + return 0 +} + +// Register a callback to be invoked each time a transaction is written +// into the write-ahead-log by this database connection. +// (See http://sqlite.org/c3ref/wal_hook.html) +func (c *Conn) WalHook(f WalHook, udp interface{}) { + if f == nil { + c.walHook = nil + C.sqlite3_wal_hook(c.db, nil, nil) + return + } + // To make sure it is not gced, keep a reference in the connection. + c.walHook = &sqliteWalHook{f, udp} + C.goSqlite3WalHook(c.db, unsafe.Pointer(c.walHook)) +} +*/ diff --git a/vendor/github.com/gwenn/gosqlite/intarray.c b/vendor/github.com/gwenn/gosqlite/intarray.c new file mode 100644 index 0000000..6a7ba69 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/intarray.c @@ -0,0 +1,262 @@ +/* +** 2009 November 10 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** +** This file implements a read-only VIRTUAL TABLE that contains the +** content of a C-language array of integer values. See the corresponding +** header file for full details. +*/ +#include +#include +#include + +/* +** An sqlite3_intarray is an abstract type to stores an instance of +** an integer array. +*/ +typedef struct sqlite3_intarray sqlite3_intarray; + +/* +** Definition of the sqlite3_intarray object. +** +** The internal representation of an intarray object is subject +** to change, is not externally visible, and should be used by +** the implementation of intarray only. This object is opaque +** to users. +*/ +struct sqlite3_intarray { + int n; /* Number of elements in the array */ + sqlite3_int64 *a; /* Contents of the array */ + void (*xFree)(void*); /* Function used to free a[] */ +}; + +/* Objects used internally by the virtual table implementation */ +typedef struct intarray_vtab intarray_vtab; +typedef struct intarray_cursor intarray_cursor; + +/* A intarray table object */ +struct intarray_vtab { + sqlite3_vtab base; /* Base class */ + sqlite3_intarray *pContent; /* Content of the integer array */ +}; + +/* A intarray cursor object */ +struct intarray_cursor { + sqlite3_vtab_cursor base; /* Base class */ + int i; /* Current cursor position */ +}; + +/* +** Free an sqlite3_intarray object. +*/ +static void intarrayFree(sqlite3_intarray *p){ + if( p->xFree ){ + p->xFree(p->a); + } + sqlite3_free(p); +} + +/* +** Table destructor for the intarray module. +*/ +static int intarrayDestroy(sqlite3_vtab *p){ + intarray_vtab *pVtab = (intarray_vtab*)p; + sqlite3_free(pVtab); + return 0; +} + +/* +** Table constructor for the intarray module. +*/ +static int intarrayCreate( + sqlite3 *db, /* Database where module is created */ + void *pAux, /* clientdata for the module */ + int argc, /* Number of arguments */ + const char *const*argv, /* Value for all arguments */ + sqlite3_vtab **ppVtab, /* Write the new virtual table object here */ + char **pzErr /* Put error message text here */ +){ + int rc = SQLITE_NOMEM; + intarray_vtab *pVtab = sqlite3_malloc(sizeof(intarray_vtab)); + + if( pVtab ){ + memset(pVtab, 0, sizeof(intarray_vtab)); + pVtab->pContent = (sqlite3_intarray*)pAux; + rc = sqlite3_declare_vtab(db, "CREATE TABLE x(value INTEGER PRIMARY KEY)"); + } + *ppVtab = (sqlite3_vtab *)pVtab; + return rc; +} + +/* +** Open a new cursor on the intarray table. +*/ +static int intarrayOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ + int rc = SQLITE_NOMEM; + intarray_cursor *pCur; + pCur = sqlite3_malloc(sizeof(intarray_cursor)); + if( pCur ){ + memset(pCur, 0, sizeof(intarray_cursor)); + *ppCursor = (sqlite3_vtab_cursor *)pCur; + rc = SQLITE_OK; + } + return rc; +} + +/* +** Close a intarray table cursor. +*/ +static int intarrayClose(sqlite3_vtab_cursor *cur){ + intarray_cursor *pCur = (intarray_cursor *)cur; + sqlite3_free(pCur); + return SQLITE_OK; +} + +/* +** Retrieve a column of data. +*/ +static int intarrayColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ + intarray_cursor *pCur = (intarray_cursor*)cur; + intarray_vtab *pVtab = (intarray_vtab*)cur->pVtab; + if( pCur->i>=0 && pCur->ipContent->n ){ + sqlite3_result_int64(ctx, pVtab->pContent->a[pCur->i]); + } + return SQLITE_OK; +} + +/* +** Retrieve the current rowid. +*/ +static int intarrayRowid(sqlite3_vtab_cursor *cur, sqlite_int64 *pRowid){ + intarray_cursor *pCur = (intarray_cursor *)cur; + *pRowid = pCur->i; + return SQLITE_OK; +} + +static int intarrayEof(sqlite3_vtab_cursor *cur){ + intarray_cursor *pCur = (intarray_cursor *)cur; + intarray_vtab *pVtab = (intarray_vtab *)cur->pVtab; + return pCur->i>=pVtab->pContent->n; +} + +/* +** Advance the cursor to the next row. +*/ +static int intarrayNext(sqlite3_vtab_cursor *cur){ + intarray_cursor *pCur = (intarray_cursor *)cur; + pCur->i++; + return SQLITE_OK; +} + +/* +** Reset a intarray table cursor. +*/ +static int intarrayFilter( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + intarray_cursor *pCur = (intarray_cursor *)pVtabCursor; + pCur->i = 0; + return SQLITE_OK; +} + +/* +** Analyse the WHERE condition. +*/ +static int intarrayBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ + return SQLITE_OK; +} + +/* +** A virtual table module that merely echos method calls into TCL +** variables. +*/ +static sqlite3_module intarrayModule = { + 0, /* iVersion */ + intarrayCreate, /* xCreate - create a new virtual table */ + intarrayCreate, /* xConnect - connect to an existing vtab */ + intarrayBestIndex, /* xBestIndex - find the best query index */ + intarrayDestroy, /* xDisconnect - disconnect a vtab */ + intarrayDestroy, /* xDestroy - destroy a vtab */ + intarrayOpen, /* xOpen - open a cursor */ + intarrayClose, /* xClose - close a cursor */ + intarrayFilter, /* xFilter - configure scan constraints */ + intarrayNext, /* xNext - advance a cursor */ + intarrayEof, /* xEof */ + intarrayColumn, /* xColumn - read data */ + intarrayRowid, /* xRowid - read data */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindMethod */ + 0, /* xRename */ +}; + +/* +** Invoke this routine to create a specific instance of an intarray object. +** The new intarray object is returned by the 3rd parameter. +** +** Each intarray object corresponds to a virtual table in the TEMP table +** with a name of zName. +** +** Destroy the intarray object by dropping the virtual table. If not done +** explicitly by the application, the virtual table will be dropped implicitly +** by the system when the database connection is closed. +*/ +int sqlite3_intarray_create( + sqlite3 *db, + const char *zName, + sqlite3_intarray **ppReturn +){ + int rc = SQLITE_OK; + sqlite3_intarray *p; + + *ppReturn = p = sqlite3_malloc( sizeof(*p) ); + if( p==0 ){ + return SQLITE_NOMEM; + } + memset(p, 0, sizeof(*p)); + rc = sqlite3_create_module_v2(db, zName, &intarrayModule, p, + (void(*)(void*))intarrayFree); + if( rc==SQLITE_OK ){ + char *zSql; + zSql = sqlite3_mprintf("CREATE VIRTUAL TABLE temp.%Q USING %Q", + zName, zName); + rc = sqlite3_exec(db, zSql, 0, 0, 0); + sqlite3_free(zSql); + } + return rc; +} + +/* +** Bind a new array array of integers to a specific intarray object. +** +** The array of integers bound must be unchanged for the duration of +** any query against the corresponding virtual table. If the integer +** array does change or is deallocated undefined behavior will result. +*/ +int sqlite3_intarray_bind( + sqlite3_intarray *pIntArray, /* The intarray object to bind to */ + int nElements, /* Number of elements in the intarray */ + sqlite3_int64 *aElements, /* Content of the intarray */ + void (*xFree)(void*) /* How to dispose of the intarray when done */ +){ + if( pIntArray->xFree ){ + pIntArray->xFree(pIntArray->a); + } + pIntArray->n = nElements; + pIntArray->a = aElements; + pIntArray->xFree = xFree; + return SQLITE_OK; +} diff --git a/vendor/github.com/gwenn/gosqlite/intarray.go b/vendor/github.com/gwenn/gosqlite/intarray.go new file mode 100644 index 0000000..9cb6531 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/intarray.go @@ -0,0 +1,150 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include + +// An sqlite3_intarray is an abstract type to stores an instance of an integer array. +typedef struct sqlite3_intarray sqlite3_intarray; +int sqlite3_intarray_bind(sqlite3_intarray *pIntArray, int nElements, sqlite3_int64 *aElements, void (*xFree)(void*)); +int sqlite3_intarray_create(sqlite3 *db, const char *zName, sqlite3_intarray **ppReturn); +*/ +import "C" + +import ( + "errors" + "fmt" + "unsafe" +) + +// IntArray is the Go-language interface definition for the "intarray" or +// integer array virtual table for SQLite. +// +// The intarray virtual table is designed to facilitate using an +// array of integers as the right-hand side of an IN operator. So +// instead of doing a prepared statement like this: +// +// SELECT * FROM table WHERE x IN (?,?,?,...,?); +// +// And then binding indivdual integers to each of ? slots, a Go-language +// application can create an intarray object (named "ex1" in the following +// example), prepare a statement like this: +// +// SELECT * FROM table WHERE x IN ex1; +// +// Then bind an ordinary Go slice of integer values to the ex1 object +// to run the statement. +// +// USAGE: +// +// One or more intarray objects can be created as follows: +// +// var p1, p2, p3 IntArray +// p1, err = db.CreateIntArray("ex1") +// p2, err = db.CreateIntArray("ex2") +// p3, err = db.CreateIntArray("ex3") +// +// Each call to CreateIntArray() generates a new virtual table +// module and a singleton of that virtual table module in the TEMP +// database. Both the module and the virtual table instance use the +// name given by the second parameter. The virtual tables can then be +// used in prepared statements: +// +// SELECT * FROM t1, t2, t3 +// WHERE t1.x IN ex1 +// AND t2.y IN ex2 +// AND t3.z IN ex3; +// +// Each integer array is initially empty. New arrays can be bound to +// an integer array as follows: +// +// p1.Bind([]int64{ 1, 2, 3, 4 }) +// p2.Bind([]int64{ 5, 6, 7, 8, 9, 10, 11 }) +// a3 := make([]int64, 100) +// // Fill in content of a3 +// p3.Bind(a3) +// +// A single intarray object can be rebound multiple times. But do not +// attempt to change the bindings of an intarray while it is in the middle +// of a query. +// +// The application must not change the intarray values while an intarray is in +// the middle of a query. +// +// The intarray object is automatically destroyed when its corresponding +// virtual table is dropped. Since the virtual tables are created in the +// TEMP database, they are automatically dropped when the database connection +// closes so the application does not normally need to take any special +// action to free the intarray objects (except if connections are pooled...). +type IntArray interface { + Bind(elements []int64) + Drop() error +} + +type intArray struct { + c *Conn + ia *C.sqlite3_intarray + name string + content []int64 +} + +// CreateIntArray create a specific instance of an intarray object. +// +// Each intarray object corresponds to a virtual table in the TEMP database +// with the specified name. +// +// Destroy the intarray object by dropping the virtual table. If not done +// explicitly by the application, the virtual table will be dropped implicitly +// by the system when the database connection is closed. +func (c *Conn) CreateIntArray(name string) (IntArray, error) { + var ia *C.sqlite3_intarray + cname := C.CString(name) + rv := C.sqlite3_intarray_create(c.db, cname, &ia) + C.free(unsafe.Pointer(cname)) + if rv != C.SQLITE_OK { + return nil, Errno(rv) + } + if ia == nil { + return nil, errors.New("sqlite succeeded without returning an intarray") + } + module := &intArray{c: c, ia: ia, name: name} + return module, nil +} + +// Bind a new array of integers to a specific intarray object. +// +// The array of integers bound must be unchanged for the duration of +// any query against the corresponding virtual table. If the integer +// array does change or is deallocated undefined behavior will result. +func (m *intArray) Bind(elements []int64) { + if m.ia == nil { + return + } + m.content = elements + var p *int64 + if len(elements) > 0 { + p = &elements[0] + } + C.sqlite3_intarray_bind(m.ia, C.int(len(elements)), (*C.sqlite3_int64)(unsafe.Pointer(p)), nil) +} + +// Drop underlying virtual table. +func (m *intArray) Drop() error { + if m == nil { + return errors.New("nil sqlite intarray") + } + if m.c == nil { + return nil + } + err := m.c.FastExec(fmt.Sprintf(`DROP TABLE temp."%s"`, escapeQuote(m.name))) + if err != nil { + return err + } + m.c = nil + m.ia = nil + return nil +} diff --git a/vendor/github.com/gwenn/gosqlite/limit.go b/vendor/github.com/gwenn/gosqlite/limit.go new file mode 100644 index 0000000..f459325 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/limit.go @@ -0,0 +1,40 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +*/ +import "C" + +// Limit enumerates run-time limit categories +// (See http://www.sqlite.org/c3ref/c_limit_attached.html) +type Limit int32 + +// Run-time limit categories +const ( + LimitLength Limit = C.SQLITE_LIMIT_LENGTH // The maximum size of any string or BLOB or table row, in bytes. + LimitColumn Limit = C.SQLITE_LIMIT_COLUMN + LimitExprDepth Limit = C.SQLITE_LIMIT_EXPR_DEPTH + LimitCompoundSelect Limit = C.SQLITE_LIMIT_COMPOUND_SELECT + LimitVdbeOp Limit = C.SQLITE_LIMIT_VDBE_OP + LimitFunctionArg Limit = C.SQLITE_LIMIT_FUNCTION_ARG + LimitAttached Limit = C.SQLITE_LIMIT_ATTACHED + LimitLikePatternLength Limit = C.SQLITE_LIMIT_LIKE_PATTERN_LENGTH + LimitVariableNumber Limit = C.SQLITE_LIMIT_VARIABLE_NUMBER + LimitTriggerLength Limit = C.SQLITE_LIMIT_TRIGGER_DEPTH +) + +// Limit queries the current value of a limit. +// (See http://www.sqlite.org/c3ref/limit.html) +func (c *Conn) Limit(id Limit) int32 { + return int32(C.sqlite3_limit(c.db, C.int(id), -1)) +} + +// SetLimit changes the value of a limit. +// (See http://www.sqlite.org/c3ref/limit.html) +func (c *Conn) SetLimit(id Limit, newVal int32) int32 { + return int32(C.sqlite3_limit(c.db, C.int(id), C.int(newVal))) +} diff --git a/vendor/github.com/gwenn/gosqlite/meta.go b/vendor/github.com/gwenn/gosqlite/meta.go new file mode 100644 index 0000000..02dbd6d --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/meta.go @@ -0,0 +1,353 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" +) + +// Databases returns one couple (name, file) for each database attached to the current database connection. +// (See http://www.sqlite.org/pragma.html#pragma_database_list) +func (c *Conn) Databases() (map[string]string, error) { + s, err := c.prepare("PRAGMA database_list") + if err != nil { + return nil, err + } + defer s.finalize() + var databases = make(map[string]string) + var name, file string + err = s.execQuery(func(s *Stmt) (err error) { + if err = s.Scan(nil, &name, &file); err != nil { + return + } + databases[name] = file + return + }) + if err != nil { + return nil, err + } + return databases, nil +} + +// Tables returns tables (no view) from 'sqlite_master'/'sqlite_temp_master' and filters system tables out. +// The database name can be empty, "main", "temp" or the name of an attached database. +func (c *Conn) Tables(dbName string) ([]string, error) { + var sql string + if len(dbName) == 0 { + sql = "SELECT name FROM sqlite_master WHERE type = 'table' AND name NOT LIKE 'sqlite_%' ORDER BY 1" + } else if strings.EqualFold("temp", dbName) { + sql = "SELECT name FROM sqlite_temp_master WHERE type = 'table' AND name NOT LIKE 'sqlite_%' ORDER BY 1" + } else { + sql = fmt.Sprintf("SELECT name FROM %s.sqlite_master WHERE type = 'table' AND name NOT LIKE 'sqlite_%%' ORDER BY 1", doubleQuote(dbName)) + } + s, err := c.prepare(sql) + if err != nil { + return nil, err + } + defer s.finalize() + var tables = make([]string, 0, 20) + err = s.Select(func(s *Stmt) error { + name, _ := s.ScanText(0) + tables = append(tables, name) + return nil + }) + if err != nil { + return nil, err + } + return tables, nil +} + +// Views returns views from 'sqlite_master'/'sqlite_temp_master'. +// The database name can be empty, "main", "temp" or the name of an attached database. +func (c *Conn) Views(dbName string) ([]string, error) { + var sql string + if len(dbName) == 0 { + sql = "SELECT name FROM sqlite_master WHERE type = 'view' ORDER BY 1" + } else if strings.EqualFold("temp", dbName) { + sql = "SELECT name FROM sqlite_temp_master WHERE type = 'view' ORDER BY 1" + } else { + sql = fmt.Sprintf("SELECT name FROM %s.sqlite_master WHERE type = 'view' ORDER BY 1", doubleQuote(dbName)) + } + s, err := c.prepare(sql) + if err != nil { + return nil, err + } + defer s.finalize() + var views = make([]string, 0, 20) + err = s.Select(func(s *Stmt) error { + name, _ := s.ScanText(0) + views = append(views, name) + return nil + }) + if err != nil { + return nil, err + } + return views, nil +} + +// Indexes returns indexes from 'sqlite_master'/'sqlite_temp_master'. +// As the index name is unique by database, (index name, table name) couples are returned. +// The database name can be empty, "main", "temp" or the name of an attached database. +func (c *Conn) Indexes(dbName string) (map[string]string, error) { + var sql string + if len(dbName) == 0 { + sql = "SELECT name, tbl_name FROM sqlite_master WHERE type = 'index'" + } else if strings.EqualFold("temp", dbName) { + sql = "SELECT name, tbl_name FROM sqlite_temp_master WHERE type = 'index'" + } else { + sql = fmt.Sprintf("SELECT name, tbl_name FROM %s.sqlite_master WHERE type = 'index'", doubleQuote(dbName)) + } + s, err := c.prepare(sql) + if err != nil { + return nil, err + } + defer s.finalize() + var indexes = make(map[string]string) + var name, table string + err = s.Select(func(s *Stmt) (err error) { + s.Scan(&name, &table) + indexes[name] = table + return + }) + if err != nil { + return nil, err + } + return indexes, nil +} + +// Column is the description of one table's column +// See Conn.Columns/IndexColumns +type Column struct { + Cid int + Name string + DataType string + NotNull bool + DfltValue string // FIXME type ? + Pk int + Autoinc bool + CollSeq string +} + +// Columns returns a description for each column in the named table/view. +// Column.Autoinc and Column.CollSeq are left unspecified. +// No error is returned if the table does not exist. +// (See http://www.sqlite.org/pragma.html#pragma_table_info) +func (c *Conn) Columns(dbName, table string) ([]Column, error) { + var pragma string + if len(dbName) == 0 { + pragma = fmt.Sprintf(`PRAGMA table_info("%s")`, escapeQuote(table)) + } else { + pragma = fmt.Sprintf(`PRAGMA %s.table_info("%s")`, doubleQuote(dbName), escapeQuote(table)) + } + s, err := c.prepare(pragma) + if err != nil { + return nil, err + } + defer s.finalize() + var columns = make([]Column, 0, 20) + err = s.execQuery(func(s *Stmt) (err error) { + c := Column{} + if err = s.Scan(&c.Cid, &c.Name, &c.DataType, &c.NotNull, &c.DfltValue, &c.Pk); err != nil { + return + } + columns = append(columns, c) + return + }) + if err != nil { + return nil, err + } + return columns, nil +} + +// ColumnDeclaredType returns the declared type of the table column of a particular result column in SELECT statement. +// If the result column is an expression or subquery, then an empty string is returned. +// The left-most column is column 0. +// (See http://www.sqlite.org/c3ref/column_decltype.html) +func (s *Stmt) ColumnDeclaredType(index int) string { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + return C.GoString(C.sqlite3_column_decltype(s.stmt, C.int(index))) +} + +// Affinity enumerates SQLite column type affinity +type Affinity string + +// SQLite column type affinities +const ( + Integral = Affinity("INTEGER") // Integer affinity + Real = Affinity("REAL") + Numerical = Affinity("NUMERIC") + None = Affinity("NONE") + Textual = Affinity("TEXT") +) + +// ColumnTypeAffinity returns the type affinity of the table column of a particular result column in SELECT statement. +// If the result column is an expression or subquery, then None is returned. +// The left-most column is column 0. +// (See http://sqlite.org/datatype3.html) +func (s *Stmt) ColumnTypeAffinity(index int) Affinity { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + if s.affinities == nil { + count := s.ColumnCount() + s.affinities = make([]Affinity, count) + } else { + if affinity := s.affinities[index]; affinity != "" { + return affinity + } + } + declType := s.ColumnDeclaredType(index) + affinity := typeAffinity(declType) + s.affinities[index] = affinity + return affinity +} + +// Affinity returns the type affinity of the column. +func (c Column) Affinity() Affinity { + return typeAffinity(c.DataType) +} + +func typeAffinity(declType string) Affinity { + if declType == "" { + return None + } + declType = strings.ToUpper(declType) + if strings.Contains(declType, "INT") { + return Integral + } else if strings.Contains(declType, "TEXT") || strings.Contains(declType, "CHAR") || strings.Contains(declType, "CLOB") { + return Textual + } else if strings.Contains(declType, "BLOB") { + return None + } else if strings.Contains(declType, "REAL") || strings.Contains(declType, "FLOA") || strings.Contains(declType, "DOUB") { + return Real + } + return Numerical +} + +// ForeignKey is the description of one table's foreign key +// See Conn.ForeignKeys +type ForeignKey struct { + Table string + From []string + To []string +} + +// ForeignKeys returns one description for each foreign key that references a column in the argument table. +// No error is returned if the table does not exist. +// (See http://www.sqlite.org/pragma.html#pragma_foreign_key_list) +func (c *Conn) ForeignKeys(dbName, table string) (map[int]*ForeignKey, error) { + var pragma string + if len(dbName) == 0 { + pragma = fmt.Sprintf(`PRAGMA foreign_key_list("%s")`, escapeQuote(table)) + } else { + pragma = fmt.Sprintf(`PRAGMA %s.foreign_key_list("%s")`, doubleQuote(dbName), escapeQuote(table)) + } + s, err := c.prepare(pragma) + if err != nil { + return nil, err + } + defer s.finalize() + var fks = make(map[int]*ForeignKey) + var id, seq int + var ref, from, to string + err = s.execQuery(func(s *Stmt) (err error) { + if err = s.NamedScan("id", &id, "seq", &seq, "table", &ref, "from", &from, "to", &to); err != nil { + return + } + fk, ex := fks[id] + if !ex { + fk = &ForeignKey{Table: ref} + fks[id] = fk + } + // TODO Ensure columns are appended in the correct order... + fk.From = append(fk.From, from) + fk.To = append(fk.To, to) + return + }) + if err != nil { + return nil, err + } + return fks, nil +} + +// Index is the description of one table's index +// See Conn.Indexes +type Index struct { + Name string + Unique bool +} + +// TableIndexes returns one description for each index associated with the given table. +// No error is returned if the table does not exist. +// (See http://www.sqlite.org/pragma.html#pragma_index_list) +func (c *Conn) TableIndexes(dbName, table string) ([]Index, error) { + var pragma string + if len(dbName) == 0 { + pragma = fmt.Sprintf(`PRAGMA index_list("%s")`, escapeQuote(table)) + } else { + pragma = fmt.Sprintf(`PRAGMA %s.index_list("%s")`, doubleQuote(dbName), escapeQuote(table)) + } + s, err := c.prepare(pragma) + if err != nil { + return nil, err + } + defer s.finalize() + var indexes = make([]Index, 0, 5) + err = s.execQuery(func(s *Stmt) (err error) { + i := Index{} + if _, err = s.ScanByIndex(1, &i.Name); err != nil { + return + } + if _, err = s.ScanByIndex(2, &i.Unique); err != nil { + return + } + indexes = append(indexes, i) + return + }) + if err != nil { + return nil, err + } + return indexes, nil +} + +// IndexColumns returns one description for each column in the named index. +// Only Column.Cid and Column.Name are specified. All other fields are unspecified. +// No error is returned if the index does not exist. +// (See http://www.sqlite.org/pragma.html#pragma_index_info) +func (c *Conn) IndexColumns(dbName, index string) ([]Column, error) { + var pragma string + if len(dbName) == 0 { + pragma = fmt.Sprintf(`PRAGMA index_info("%s")`, escapeQuote(index)) + } else { + pragma = fmt.Sprintf(`PRAGMA %s.index_info("%s")`, doubleQuote(dbName), escapeQuote(index)) + } + s, err := c.prepare(pragma) + if err != nil { + return nil, err + } + defer s.finalize() + var columns = make([]Column, 0, 5) + err = s.execQuery(func(s *Stmt) (err error) { + c := Column{} + if err = s.Scan(nil, &c.Cid, &c.Name); err != nil { + return + } + columns = append(columns, c) + return + }) + if err != nil { + return nil, err + } + return columns, nil +} diff --git a/vendor/github.com/gwenn/gosqlite/meta_extra.go b/vendor/github.com/gwenn/gosqlite/meta_extra.go new file mode 100644 index 0000000..c438c1d --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/meta_extra.go @@ -0,0 +1,76 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build all +// See SQLITE_ENABLE_COLUMN_METADATA (http://www.sqlite.org/compile.html) + +package sqlite + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" + "unsafe" +) + +// Column extracts metadata about a column of a table (doesn't work with view). +// Column.Cid and Column.DfltValue are left unspecified. +// (See http://sqlite.org/c3ref/table_column_metadata.html) +func (c *Conn) Column(dbName, tableName, columnName string) (*Column, error) { + var zDbName *C.char + if len(dbName) > 0 { + zDbName = C.CString(dbName) + defer C.free(unsafe.Pointer(zDbName)) + } + zTableName := C.CString(tableName) + defer C.free(unsafe.Pointer(zTableName)) + zColumnName := C.CString(columnName) + defer C.free(unsafe.Pointer(zColumnName)) + var zDataType, zCollSeq *C.char + var notNull, primaryKey, autoinc C.int + rv := C.sqlite3_table_column_metadata(c.db, zDbName, zTableName, zColumnName, &zDataType, &zCollSeq, + ¬Null, &primaryKey, &autoinc) + if rv != C.SQLITE_OK { + return nil, c.error(rv, fmt.Sprintf("Conn.Column(db: %q, tbl: %q, col: %q)", dbName, tableName, columnName)) + } + return &Column{-1, columnName, C.GoString(zDataType), notNull != 0, "", int(primaryKey), + autoinc != 0, C.GoString(zCollSeq)}, nil +} + +// ColumnDatabaseName returns the database +// that is the origin of a particular result column in SELECT statement. +// The left-most column is column 0. +// (See http://www.sqlite.org/c3ref/column_database_name.html) +func (s *Stmt) ColumnDatabaseName(index int) string { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + return C.GoString(C.sqlite3_column_database_name(s.stmt, C.int(index))) +} + +// ColumnTableName returns the original un-aliased table name +// that is the origin of a particular result column in SELECT statement. +// The left-most column is column 0. +// (See http://www.sqlite.org/c3ref/column_database_name.html) +func (s *Stmt) ColumnTableName(index int) string { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + return C.GoString(C.sqlite3_column_table_name(s.stmt, C.int(index))) +} + +// ColumnOriginName returns the original un-aliased table column name +// that is the origin of a particular result column in SELECT statement. +// The left-most column is column 0. +// (See http://www.sqlite.org/c3ref/column_database_name.html) +func (s *Stmt) ColumnOriginName(index int) string { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + return C.GoString(C.sqlite3_column_origin_name(s.stmt, C.int(index))) +} diff --git a/vendor/github.com/gwenn/gosqlite/pool.go b/vendor/github.com/gwenn/gosqlite/pool.go new file mode 100644 index 0000000..4045791 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/pool.go @@ -0,0 +1,180 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build all + +package sqlite + +import ( + "sync" + "time" +) + +// Pool adapted from https://code.google.com/p/vitess/source/browse/go/pools/roundrobin.go +type Pool struct { + mu sync.Mutex + available *sync.Cond + conns chan *Conn + size int + factory ConnOpen + idleTimeout time.Duration +} + +// ConnOpen is the signature of connection factory. +type ConnOpen func() (*Conn, error) + +// NewPool creates a connection pool. +// factory will be the function used to create connections. +// capacity is the maximum number of connections created. +// If a connection is unused beyond idleTimeout, it's discarded. +func NewPool(factory ConnOpen, capacity int, idleTimeout time.Duration) *Pool { + p := &Pool{conns: make(chan *Conn, capacity), factory: factory, idleTimeout: idleTimeout} + p.available = sync.NewCond(&p.mu) + return p +} + +// Get will return the next available connection. If none is available, and capacity +// has not been reached, it will create a new one using the factory. Otherwise, +// it will indefinitely wait till the next connection becomes available. +func (p *Pool) Get() (*Conn, error) { + return p.get(true) +} + +// TryGet will return the next available connection. If none is available, and capacity +// has not been reached, it will create a new one using the factory. Otherwise, +// it will return nil with no error. +func (p *Pool) TryGet() (*Conn, error) { + return p.get(false) +} + +func (p *Pool) get(wait bool) (*Conn, error) { + p.mu.Lock() + defer p.mu.Unlock() + // Any waits in this loop will release the lock, and it will be + // reacquired before the waits return. + for { + select { + case conn := <-p.conns: + // Found a free resource in the channel + if p.idleTimeout > 0 && conn.timeUsed.Add(p.idleTimeout).Sub(time.Now()) < 0 { + // connection has been idle for too long. Discard & go for next. + go conn.Close() + p.size-- + // Nobody else should be waiting, but signal anyway. + p.available.Signal() + continue + } + return conn, nil + default: + // connection channel is empty + if p.size >= cap(p.conns) { + // The pool is full + if wait { + p.available.Wait() + continue + } + return nil, nil + } + // Pool is not full. Create a connection. + var conn *Conn + var err error + if conn, err = p.waitForCreate(); err != nil { + // size was decremented, and somebody could be waiting. + p.available.Signal() + return nil, err + } + // Creation successful. Account for this by incrementing size. + p.size++ + return conn, err + } + } +} + +func (p *Pool) waitForCreate() (*Conn, error) { + // Prevent thundering herd: increment size before creating resource, and decrement after. + p.size++ + p.mu.Unlock() + defer func() { + p.mu.Lock() + p.size-- + }() + return p.factory() +} + +// Release will return a connection to the pool. You MUST return every connection to the pool, +// even if it's closed. If a connection is closed, Release will discard it. +func (p *Pool) Release(c *Conn) { + p.mu.Lock() + defer p.available.Signal() + defer p.mu.Unlock() + + if p.size > cap(p.conns) { + go c.Close() + p.size-- + } else if c.IsClosed() { + p.size-- + } else { + if len(p.conns) == cap(p.conns) { + panic("unexpected") + } + c.timeUsed = time.Now() + p.conns <- c + } +} + +// Close empties the pool closing all its connections. +// It waits for all connections to be returned (Release). +func (p *Pool) Close() { + p.mu.Lock() + defer p.mu.Unlock() + for p.size > 0 { + select { + case conn := <-p.conns: + go conn.Close() + p.size-- + default: + p.available.Wait() + } + } + p.factory = nil +} + +// IsClosed returns true when the pool has been closed. +func (p *Pool) IsClosed() bool { + return p.factory == nil +} + +// SetCapacity changes the capacity of the pool. +// You can use it to expand or shrink. +func (p *Pool) SetCapacity(capacity int) { + p.mu.Lock() + defer p.available.Broadcast() + defer p.mu.Unlock() + + nr := make(chan *Conn, capacity) + // This loop transfers connections from the old channel + // to the new one, until it fills up or runs out. + // It discards extras, if any. + for { + select { + case conn := <-p.conns: + if len(nr) < cap(nr) { + nr <- conn + } else { + go conn.Close() + p.size-- + } + continue + default: + } + break + } + p.conns = nr +} + +func (p *Pool) SetIdleTimeout(idleTimeout time.Duration) { + p.mu.Lock() + defer p.mu.Unlock() + p.idleTimeout = idleTimeout +} diff --git a/vendor/github.com/gwenn/gosqlite/pragma.go b/vendor/github.com/gwenn/gosqlite/pragma.go new file mode 100644 index 0000000..27ead12 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/pragma.go @@ -0,0 +1,289 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +*/ +import "C" + +import ( + "fmt" + "io" +) + +// IntegrityCheck checks database integrity. +// Database name is optional (default is 'main'). +// (See http://www.sqlite.org/pragma.html#pragma_integrity_check +// and http://www.sqlite.org/pragma.html#pragma_quick_check) +func (c *Conn) IntegrityCheck(dbName string, max int, quick bool) error { + var prefix string + if quick { + prefix = "quick" + } else { + prefix = "integrity" + } + pragmaName := fmt.Sprintf("%s_check(%d)", prefix, max) + var msg string + err := c.oneValue(pragma(dbName, pragmaName), &msg) + if err != nil { + return err + } + if msg != "ok" { + return c.specificError("integrity check failed on %q (%s)", dbName, msg) + } + return nil +} + +// Encoding returns the text encoding used by the specified database. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_encoding) +func (c *Conn) Encoding(dbName string) (string, error) { + var encoding string + err := c.oneValue(pragma(dbName, "encoding"), &encoding) + if err != nil { + return "", err + } + return encoding, nil +} + +// SchemaVersion gets the value of the schema-version. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_schema_version) +func (c *Conn) SchemaVersion(dbName string) (int, error) { + var version int + err := c.oneValue(pragma(dbName, "schema_version"), &version) + if err != nil { + return -1, err + } + return version, nil +} + +// SetRecursiveTriggers sets or clears the recursive trigger capability. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_recursive_triggers) +func (c *Conn) SetRecursiveTriggers(dbName string, on bool) error { + return c.FastExec(pragma(dbName, fmt.Sprintf("recursive_triggers=%t", on))) +} + +// JournalMode queries the current journaling mode for database. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_journal_mode) +func (c *Conn) JournalMode(dbName string) (string, error) { + var mode string + err := c.oneValue(pragma(dbName, "journal_mode"), &mode) + if err != nil { + return "", err + } + return mode, nil +} + +// SetJournalMode changes the journaling mode for database. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_journal_mode) +func (c *Conn) SetJournalMode(dbName, mode string) (string, error) { + var newMode string + err := c.oneValue(pragma(dbName, Mprintf("journal_mode=%Q", mode)), &newMode) + if err != nil { + return "", err + } + return newMode, nil +} + +// LockingMode queries the database connection locking-mode. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_locking_mode) +func (c *Conn) LockingMode(dbName string) (string, error) { + var mode string + err := c.oneValue(pragma(dbName, "locking_mode"), &mode) + if err != nil { + return "", err + } + return mode, nil +} + +// SetLockingMode changes the database connection locking-mode. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_locking_mode) +func (c *Conn) SetLockingMode(dbName, mode string) (string, error) { + var newMode string + err := c.oneValue(pragma(dbName, Mprintf("locking_mode=%Q", mode)), &newMode) + if err != nil { + return "", err + } + return newMode, nil +} + +// Synchronous queries the synchronous flag. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_synchronous) +func (c *Conn) Synchronous(dbName string) (int, error) { + var mode int + err := c.oneValue(pragma(dbName, "synchronous"), &mode) + if err != nil { + return -1, err + } + return mode, nil +} + +// SetSynchronous changes the synchronous flag. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_synchronous) +func (c *Conn) SetSynchronous(dbName string, mode int) error { + return c.FastExec(pragma(dbName, fmt.Sprintf("synchronous=%d", mode))) +} + +// FkViolation is the description of one foreign key constraint violation. +type FkViolation struct { + Table string + RowID int64 + Parent string + FkID int +} + +// ForeignKeyCheck checks the database, or the table, for foreign key constraints that are violated +// and returns one row of output for each violation. +// Database name is optional (default is 'main'). +// Table name is optional (default is all tables). +// (See http://sqlite.org/pragma.html#pragma_foreign_key_check) +func (c *Conn) ForeignKeyCheck(dbName, table string) ([]FkViolation, error) { + var pragma string + if len(dbName) == 0 { + if len(table) == 0 { + pragma = "PRAGMA foreign_key_check" + } else { + pragma = fmt.Sprintf(`PRAGMA foreign_key_check("%s")`, escapeQuote(table)) + } + } else { + if len(table) == 0 { + pragma = fmt.Sprintf("PRAGMA %s.foreign_key_check", doubleQuote(dbName)) + } else { + pragma = fmt.Sprintf(`PRAGMA %s.foreign_key_check("%s")`, doubleQuote(dbName), escapeQuote(table)) + } + } + s, err := c.prepare(pragma) + if err != nil { + return nil, err + } + defer s.finalize() + // table|rowid|parent|fkid + var violations = make([]FkViolation, 0, 20) + err = s.execQuery(func(s *Stmt) (err error) { + v := FkViolation{} + if err = s.Scan(&v.Table, &v.RowID, &v.Parent, &v.FkID); err != nil { + return + } + violations = append(violations, v) + return + }) + if err != nil { + return nil, err + } + return violations, nil +} + +// QueryOnly queries the status of the database. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_query_only) +func (c *Conn) QueryOnly(dbName string) (bool, error) { + var queryOnly bool + err := c.oneValue(pragma(dbName, "query_only"), &queryOnly) + if err != nil { + return false, err + } + return queryOnly, nil +} + +// SetQueryOnly prevents all changes to database files when enabled. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_query_only) +func (c *Conn) SetQueryOnly(dbName string, mode bool) error { + return c.FastExec(pragma(dbName, fmt.Sprintf("query_only=%t", mode))) +} + +// ApplicationID queries the "Application ID" integer located into the database header. +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_application_id) +func (c *Conn) ApplicationID(dbName string) (int, error) { + var id int + err := c.oneValue(pragma(dbName, "application_id"), &id) + if err != nil { + return -1, err + } + return id, nil +} + +// SetApplicationID changes the "Application ID". +// Database name is optional (default is 'main'). +// (See http://sqlite.org/pragma.html#pragma_application_id) +func (c *Conn) SetApplicationID(dbName string, id int) error { + return c.FastExec(pragma(dbName, fmt.Sprintf("application_id=%d", id))) +} + +// MMapSize queries the maximum number of bytes that are set aside for memory-mapped I/O. +// Database name is optional (default is 'main'). +// (See http://www.sqlite.org/pragma.html#pragma_mmap_size and http://sqlite.org/mmap.html) +func (c *Conn) MMapSize(dbName string) (int64, error) { + var size int64 + err := c.oneValue(pragma(dbName, "mmap_size"), &size) + if err != nil { + return -1, err + } + return size, nil +} + +// SetMMapSize changes the maximum number of bytes that are set aside for memory-mapped I/O. +// Database name is optional (default is 'main'). +// If the specified size is zero then memory mapped I/O is disabled. +// If the specified size is negative, then the limit reverts to the default value. +// The size of the memory-mapped I/O region cannot be changed while the memory-mapped I/O region is in active use. +// (See http://www.sqlite.org/pragma.html#pragma_mmap_size and http://sqlite.org/mmap.html) +func (c *Conn) SetMMapSize(dbName string, size int64) (int64, error) { + var newSize int64 + err := c.oneValue(pragma(dbName, fmt.Sprintf("mmap_size=%d", size)), &newSize) + if err != nil { + return -1, err + } + return newSize, nil +} + +func pragma(dbName, pragmaName string) string { + if len(dbName) == 0 { + return "PRAGMA " + pragmaName + } + if dbName == "main" || dbName == "temp" { + return fmt.Sprintf("PRAGMA %s.%s", dbName, pragmaName) + } + return fmt.Sprintf("PRAGMA %s.%s", doubleQuote(dbName), pragmaName) +} + +func (c *Conn) oneValue(query string, value interface{}) error { // no cache + s, err := c.prepare(query) + if err != nil { + return err + } + defer s.finalize() + rv := C.sqlite3_step(s.stmt) + err = Errno(rv) + if err == Row { + return s.Scan(value) + } else if err == Done { + return io.EOF + } + return s.error(rv, fmt.Sprintf("Conn.oneValue(%q)", query)) +} +func (s *Stmt) execQuery(rowCallbackHandler func(s *Stmt) error) error { // no check on column count + for { + if ok, err := s.Next(); err != nil { + return err + } else if !ok { + break + } + if err := rowCallbackHandler(s); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gwenn/gosqlite/sqlite.go b/vendor/github.com/gwenn/gosqlite/sqlite.go new file mode 100644 index 0000000..e5919cb --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/sqlite.go @@ -0,0 +1,671 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package sqlite provides access to the SQLite library, version 3. +package sqlite + +/* +#cgo linux freebsd pkg-config: sqlite3 +#cgo !linux,!freebsd LDFLAGS: -lsqlite3 + +#include +#include + +#if SQLITE_VERSION_NUMBER < 3007015 +const char *sqlite3_errstr(int rc) { + return ""; +} +#endif +*/ +import "C" + +import ( + "errors" + "fmt" + "io" + "os" + "strconv" + "time" + "unsafe" +) + +// OpenError is for detailed report on SQLite open failure. +type OpenError struct { + Code Errno // thread safe error code + ExtendedCode int + Msg string + Filename string +} + +func (e OpenError) Error() string { + file := e.Filename + if file == "" { + file = "(temporary)" + } + s := fmt.Sprintf("%s: open: ", file) + codeErr := e.Code.Error() + if len(e.Msg) > 0 { + s += e.Msg + // error code and msg are often redundant but not always + // (see sqlite3ErrorWithMsg usages in SQLite3 sources) + if codeErr != e.Msg { + s += fmt.Sprintf(" (%s)", codeErr) + } + } else { + s += codeErr + } + return s +} + +// ConnError is a wrapper for all SQLite connection related error. +type ConnError struct { + c *Conn + code Errno // thread safe error code + msg string // it might be the case that a second error occurs on a separate thread in between the time of the first error and the call to retrieve this message. + details string // contextual informations, thread safe +} + +// Code returns the original SQLite error code (or -1 for errors generated by the Go wrapper) +func (e ConnError) Code() Errno { + return e.code +} + +// ExtendedCode returns the SQLite extended error code. +// (See http://www.sqlite.org/c3ref/errcode.html) +// FIXME it might be the case that a second error occurs on a separate thread in between the time of the first error and the call to this method. +func (e ConnError) ExtendedCode() int { + return int(C.sqlite3_extended_errcode(e.c.db)) +} + +// Filename returns database file name from which the error comes from. +func (e ConnError) Filename() string { + return e.c.Filename("main") +} + +func (e ConnError) Error() string { // FIXME code.Error() & e.msg are often redundant... + if len(e.details) > 0 { + return fmt.Sprintf("%s (%s) (%s)", e.msg, e.details, e.code.Error()) + } else if len(e.msg) > 0 { + return fmt.Sprintf("%s (%s)", e.msg, e.code.Error()) + } + return e.code.Error() +} + +// Errno enumerates SQLite result codes +type Errno int32 + +func (e Errno) Error() string { + var s string + if e == ErrSpecific { + s = "wrapper specific error" + } else { + s = C.GoString(C.sqlite3_errstr(C.int(e))) // thread safe + } + if s == "" { + return fmt.Sprintf("errno %d", int(e)) + } + return s +} + +// SQLite result codes +const ( + ErrError = Errno(C.SQLITE_ERROR) /* SQL error or missing database */ + ErrInternal = Errno(C.SQLITE_INTERNAL) /* Internal logic error in SQLite */ + ErrPerm = Errno(C.SQLITE_PERM) /* Access permission denied */ + ErrAbort = Errno(C.SQLITE_ABORT) /* Callback routine requested an abort */ + ErrBusy = Errno(C.SQLITE_BUSY) /* The database file is locked */ + ErrLocked = Errno(C.SQLITE_LOCKED) /* A table in the database is locked */ + ErrNoMem = Errno(C.SQLITE_NOMEM) /* A malloc() failed */ + ErrReadOnly = Errno(C.SQLITE_READONLY) /* Attempt to write a readonly database */ + ErrInterrupt = Errno(C.SQLITE_INTERRUPT) /* Operation terminated by sqlite3_interrupt()*/ + ErrIOErr = Errno(C.SQLITE_IOERR) /* Some kind of disk I/O error occurred */ + ErrCorrupt = Errno(C.SQLITE_CORRUPT) /* The database disk image is malformed */ + ErrNotFound = Errno(C.SQLITE_NOTFOUND) /* Unknown opcode in sqlite3_file_control() */ + ErrFull = Errno(C.SQLITE_FULL) /* Insertion failed because database is full */ + ErrCantOpen = Errno(C.SQLITE_CANTOPEN) /* Unable to open the database file */ + ErrProtocol = Errno(C.SQLITE_PROTOCOL) /* Database lock protocol error */ + ErrEmpty = Errno(C.SQLITE_EMPTY) /* Database is empty */ + ErrSchema = Errno(C.SQLITE_SCHEMA) /* The database schema changed */ + ErrTooBig = Errno(C.SQLITE_TOOBIG) /* String or BLOB exceeds size limit */ + ErrConstraint = Errno(C.SQLITE_CONSTRAINT) /* Abort due to constraint violation */ + ErrMismatch = Errno(C.SQLITE_MISMATCH) /* Data type mismatch */ + ErrMisuse = Errno(C.SQLITE_MISUSE) /* Library used incorrectly */ + ErrNolfs = Errno(C.SQLITE_NOLFS) /* Uses OS features not supported on host */ + ErrAuth = Errno(C.SQLITE_AUTH) /* Authorization denied */ + ErrFormat = Errno(C.SQLITE_FORMAT) /* Auxiliary database format error */ + ErrRange = Errno(C.SQLITE_RANGE) /* 2nd parameter to sqlite3_bind out of range */ + ErrNotDB = Errno(C.SQLITE_NOTADB) /* File opened that is not a database file */ + //Notice = Errno(C.SQLITE_NOTICE) /* Notifications from sqlite3_log() */ + //Warning = Errno(C.SQLITE_WARNING) /* Warnings from sqlite3_log() */ + + Row = Errno(C.SQLITE_ROW) /* sqlite3_step() has another row ready */ + Done = Errno(C.SQLITE_DONE) /* sqlite3_step() has finished executing */ + ErrSpecific = Errno(-1) /* Wrapper specific error */ +) + +func (c *Conn) error(rv C.int, details ...string) error { + if c == nil { + return errors.New("nil sqlite database") + } + if rv == C.SQLITE_OK { + return nil + } + err := ConnError{c: c, code: Errno(rv), msg: C.GoString(C.sqlite3_errmsg(c.db))} + if len(details) > 0 { + err.details = details[0] + } + return err +} + +func (c *Conn) specificError(msg string, a ...interface{}) error { + return ConnError{c: c, code: ErrSpecific, msg: fmt.Sprintf(msg, a...)} +} + +// LastError returns the error for the most recent failed sqlite3_* API call associated with a database connection. +// (See http://sqlite.org/c3ref/errcode.html) +// FIXME it might be the case that a second error occurs on a separate thread in between the time of the first error and the call to this method. +func (c *Conn) LastError() error { + if c == nil { + return errors.New("nil sqlite database") + } + errorCode := C.sqlite3_errcode(c.db) + if errorCode == C.SQLITE_OK { + return nil + } + return ConnError{c: c, code: Errno(errorCode), msg: C.GoString(C.sqlite3_errmsg(c.db))} +} + +// Conn represents a database connection handle. +// (See http://sqlite.org/c3ref/sqlite3.html) +type Conn struct { + db *C.sqlite3 + stmtCache *cache + authorizer *sqliteAuthorizer + busyHandler *sqliteBusyHandler + profile *sqliteProfile + progressHandler *sqliteProgressHandler + trace *sqliteTrace + commitHook *sqliteCommitHook + rollbackHook *sqliteRollbackHook + updateHook *sqliteUpdateHook + udfs map[string]*sqliteFunction + modules map[string]*sqliteModule + timeUsed time.Time + nTransaction uint8 + // DefaultTimeLayout specifies the layout used to persist time ("2006-01-02 15:04:05.000Z07:00" by default). + // When set to "", time is persisted as integer (unix time). + // Using type alias implementing the Scanner/Valuer interfaces is suggested... + DefaultTimeLayout string + // ScanNumericalAsTime tells the driver to try to parse column with NUMERIC affinity as time.Time (using the DefaultTimeLayout) + ScanNumericalAsTime bool +} + +// Version returns the run-time library version number +// (See http://sqlite.org/c3ref/libversion.html) +func Version() string { + p := C.sqlite3_libversion() + return C.GoString(p) +} + +// VersionNumber returns the run-time library version number as 300X00Y +// (See http://sqlite.org/c3ref/libversion.html) +func VersionNumber() int32 { + return int32(C.sqlite3_libversion_number()) +} + +// OpenFlag enumerates flags for file open operations +type OpenFlag int32 + +// Flags for file open operations +const ( + OpenReadOnly OpenFlag = C.SQLITE_OPEN_READONLY + OpenReadWrite OpenFlag = C.SQLITE_OPEN_READWRITE + OpenCreate OpenFlag = C.SQLITE_OPEN_CREATE + OpenURI OpenFlag = C.SQLITE_OPEN_URI + OpenNoMutex OpenFlag = C.SQLITE_OPEN_NOMUTEX + OpenFullMutex OpenFlag = C.SQLITE_OPEN_FULLMUTEX + OpenSharedCache OpenFlag = C.SQLITE_OPEN_SHAREDCACHE + OpenPrivateCache OpenFlag = C.SQLITE_OPEN_PRIVATECACHE +) + +// Open opens a new database connection. +// ":memory:" for memory db, +// "" for temp file db +// +// (See sqlite3_open_v2: http://sqlite.org/c3ref/open.html) +func Open(filename string, flags ...OpenFlag) (*Conn, error) { + return OpenVfs(filename, "", flags...) +} + +// OpenVfs opens a new database with a specified virtual file system. +func OpenVfs(filename string, vfsname string, flags ...OpenFlag) (*Conn, error) { + if C.sqlite3_threadsafe() == 0 { + return nil, errors.New("sqlite library was not compiled for thread-safe operation") + } + var openFlags int + if len(flags) > 0 { + for _, flag := range flags { + openFlags |= int(flag) + } + } else { + openFlags = C.SQLITE_OPEN_FULLMUTEX | C.SQLITE_OPEN_READWRITE | C.SQLITE_OPEN_CREATE + } + + var db *C.sqlite3 + cname := C.CString(filename) + defer C.free(unsafe.Pointer(cname)) + var vfs *C.char + if len(vfsname) > 0 { + vfs = C.CString(vfsname) + defer C.free(unsafe.Pointer(vfs)) + } + rv := C.sqlite3_open_v2(cname, &db, C.int(openFlags), vfs) + if rv != C.SQLITE_OK { + err := OpenError{ + Code: Errno(rv), + Filename: filename, + } + if db != nil { // try to extract further details from db... + err.ExtendedCode = int(C.sqlite3_extended_errcode(db)) + err.Msg = C.GoString(C.sqlite3_errmsg(db)) + C.sqlite3_close(db) + return nil, err + } + return nil, err + } + if db == nil { + return nil, errors.New("sqlite succeeded without returning a database") + } + c := &Conn{db: db, stmtCache: newCache(), DefaultTimeLayout: "2006-01-02 15:04:05.000Z07:00"} + if os.Getenv("SQLITE_DEBUG") != "" { + //c.SetAuthorizer(authorizer, c.db) + c.Trace(trace, "TRACE") + //c.SetCacheSize(0) + } + + return c, nil +} + +/* +func authorizer(d interface{}, action Action, arg1, arg2, dbName, triggerName string) Auth { + fmt.Fprintf(os.Stderr, "%p: %v, %s, %s, %s, %s\n", d, action, arg1, arg2, dbName, triggerName) + return AuthOk +} +*/ +func trace(d interface{}, sql string) { + fmt.Fprintf(os.Stderr, "%s: %s\n", d, sql) +} + +// BusyTimeout sets a busy timeout and clears any previously set handler. +// If duration is zero or negative, turns off busy handler. +// (See http://sqlite.org/c3ref/busy_timeout.html) +func (c *Conn) BusyTimeout(d time.Duration) error { + c.busyHandler = nil + return c.error(C.sqlite3_busy_timeout(c.db, C.int(d/time.Millisecond)), "Conn.BusyTimeout") +} + +// Readonly determines if a database is read-only. +// (See http://sqlite.org/c3ref/db_readonly.html) +func (c *Conn) Readonly(dbName string) (bool, error) { + cname := C.CString(dbName) + rv := C.sqlite3_db_readonly(c.db, cname) + C.free(unsafe.Pointer(cname)) + if rv == -1 { + return false, c.specificError("%q is not the name of a database", dbName) + } + return rv == 1, nil +} + +// Filename returns the filename for a database connection. +// (See http://sqlite.org/c3ref/db_filename.html) +func (c *Conn) Filename(dbName string) string { + cname := C.CString(dbName) + defer C.free(unsafe.Pointer(cname)) + return C.GoString(C.sqlite3_db_filename(c.db, cname)) +} + +// Exec prepares and executes one or many parameterized statement(s) (separated by semi-colon). +// Don't use it with SELECT or anything that returns data. +func (c *Conn) Exec(cmd string, args ...interface{}) error { + for len(cmd) > 0 { + s, err := c.prepare(cmd) + if err != nil { + return err + } else if s.stmt == nil { + // this happens for a comment or white-space + cmd = s.tail + continue + } + var subargs []interface{} + count := s.BindParameterCount() + if len(s.tail) > 0 && len(args) >= count { + subargs = args[:count] + args = args[count:] + } else { + subargs = args + } + err = s.Exec(subargs...) + if err != nil { + s.finalize() + return err + } + if err = s.finalize(); err != nil { + return err + } + cmd = s.tail + } + return nil +} + +// ExecDml helps executing DML statement: +// (1) it binds the specified args, +// (2) it executes the statement, +// (3) it returns the number of rows that were changed or inserted or deleted. +func (c *Conn) ExecDml(cmd string, args ...interface{}) (changes int, err error) { + s, err := c.Prepare(cmd) + if err != nil { + return -1, err + } + defer s.Finalize() + return s.ExecDml(args...) +} + +// Insert is like ExecDml but returns the autoincremented rowid. +func (c *Conn) Insert(cmd string, args ...interface{}) (rowid int64, err error) { + n, err := c.ExecDml(cmd, args...) + if err != nil { + return -1, err + } + if n == 0 { // No change => no insert... + return -1, nil + } + return c.LastInsertRowid(), nil +} + +// Select helps executing SELECT statement: +// (1) it binds the specified args, +// (2) it steps on the rows returned, +// (3) it delegates scanning to a callback function. +// The callback function is invoked for each result row coming out of the statement. +func (c *Conn) Select(query string, rowCallbackHandler func(s *Stmt) error, args ...interface{}) error { + s, err := c.Prepare(query) + if err != nil { + return err + } + defer s.Finalize() + return s.Select(rowCallbackHandler, args...) +} + +// SelectByID helps executing SELECT statement that is expected to return only one row. +// Args are for scanning (not binding). +// Returns false if there is no matching row. +// No check is done to ensure that no more than one row is returned by the statement. +func (c *Conn) SelectByID(query string, id interface{}, args ...interface{}) (found bool, err error) { + s, err := c.Prepare(query, id) + if err != nil { + return false, err + } + defer s.Finalize() + return s.SelectOneRow(args...) +} + +// Exists returns true if the specified query returns at least one row. +func (c *Conn) Exists(query string, args ...interface{}) (bool, error) { + s, err := c.Prepare(query, args...) + if err != nil { + return false, err + } + defer s.Finalize() + ok, err := s.Next() + if err != nil { + return false, err + } + if s.ColumnCount() == 0 { + return false, s.specificError("don't use Exists with query that returns no data such as %q", query) + } + return ok, nil +} + +// OneValue is used with SELECT that returns only one row with only one column. +// Returns io.EOF when there is no row. +// No check is performed to ensure that there is no more than one row. +func (c *Conn) OneValue(query string, value interface{}, args ...interface{}) error { + s, err := c.Prepare(query, args...) + if err != nil { + return err + } + defer s.Finalize() + b, err := s.Next() + if err != nil { + return err + } else if !b { + if s.ColumnCount() == 0 { + return s.specificError("don't use OneValue with query that returns no data such as %q", query) + } + return io.EOF + } + return s.Scan(value) +} + +// Changes returns the number of database rows that were changed or inserted or deleted by the most recently completed SQL statement on the database connection. +// If a separate thread makes changes on the same database connection while Changes() is running then the value returned is unpredictable and not meaningful. +// (See http://sqlite.org/c3ref/changes.html) +func (c *Conn) Changes() int { + return int(C.sqlite3_changes(c.db)) +} + +// TotalChanges returns the number of row changes caused by INSERT, UPDATE or DELETE statements since the database connection was opened. +// (See http://sqlite.org/c3ref/total_changes.html) +func (c *Conn) TotalChanges() int { + return int(C.sqlite3_total_changes(c.db)) +} + +// LastInsertRowid returns the rowid of the most recent successful INSERT into the database. +// If a separate thread performs a new INSERT on the same database connection while the LastInsertRowid() function is running and thus changes the last insert rowid, then the value returned by LastInsertRowid() is unpredictable and might not equal either the old or the new last insert rowid. +// (See http://sqlite.org/c3ref/last_insert_rowid.html) +func (c *Conn) LastInsertRowid() int64 { + return int64(C.sqlite3_last_insert_rowid(c.db)) +} + +// Interrupt interrupts a long-running query. +// (See http://sqlite.org/c3ref/interrupt.html) +func (c *Conn) Interrupt() { + C.sqlite3_interrupt(c.db) +} + +// GetAutocommit tests for auto-commit mode. +// (See http://sqlite.org/c3ref/get_autocommit.html) +func (c *Conn) GetAutocommit() bool { + return C.sqlite3_get_autocommit(c.db) != 0 +} + +// TransactionType enumerates the different transaction behaviors +// See Conn.BeginTransaction +type TransactionType uint8 + +// Transaction types +const ( + Deferred TransactionType = 0 + Immediate TransactionType = 1 + Exclusive TransactionType = 2 +) + +// Begin begins a transaction in deferred mode. +// (See http://www.sqlite.org/lang_transaction.html) +func (c *Conn) Begin() error { + return c.BeginTransaction(Deferred) +} + +// BeginTransaction begins a transaction of the specified type. +// (See http://www.sqlite.org/lang_transaction.html) +func (c *Conn) BeginTransaction(t TransactionType) error { + if t == Deferred { + return c.FastExec("BEGIN") + } else if t == Immediate { + return c.FastExec("BEGIN IMMEDIATE") + } else if t == Exclusive { + return c.FastExec("BEGIN EXCLUSIVE") + } + panic(fmt.Sprintf("Unsupported transaction type: '%#v'", t)) +} + +// Commit commits transaction. +// It is strongly discouraged to defer Commit without checking the error returned. +func (c *Conn) Commit() error { + // Although there are situations when it is possible to recover and continue a transaction, + // it is considered a best practice to always issue a ROLLBACK if an error is encountered. + // In situations when SQLite was already forced to roll back the transaction and has returned to autocommit mode, + // the ROLLBACK will do nothing but return an error that can be safely ignored. + err := c.FastExec("COMMIT") + if err != nil && !c.GetAutocommit() { + c.Rollback() + } + return err +} + +// Rollback rollbacks transaction +func (c *Conn) Rollback() error { + return c.FastExec("ROLLBACK") +} + +// Transaction is used to execute a function inside an SQLite database transaction. +// The transaction is committed when the function completes (with no error), +// or it rolls back if the function fails. +// If the transaction occurs within another transaction (only one that is started using this method) a Savepoint is created. +// Two errors may be returned: the first is the one returned by the f function, +// the second is the one returned by begin/commit/rollback. +// (See http://sqlite.org/tclsqlite.html#transaction) +func (c *Conn) Transaction(t TransactionType, f func(c *Conn) error) error { + var err error + if c.nTransaction == 0 { + err = c.BeginTransaction(t) + } else { + err = c.Savepoint(strconv.Itoa(int(c.nTransaction))) + } + if err != nil { + return err + } + c.nTransaction++ + defer func() { + c.nTransaction-- + if err != nil { + _, ko := err.(*ConnError) + if c.nTransaction == 0 || ko { + c.Rollback() + } else { + if rerr := c.RollbackSavepoint(strconv.Itoa(int(c.nTransaction))); rerr != nil { + Log(-1, rerr.Error()) + } else if rerr := c.ReleaseSavepoint(strconv.Itoa(int(c.nTransaction))); rerr != nil { + Log(-1, rerr.Error()) + } + } + } else { + if c.nTransaction == 0 { + err = c.Commit() + } else { + err = c.ReleaseSavepoint(strconv.Itoa(int(c.nTransaction))) + } + if err != nil { + c.Rollback() + } + } + }() + err = f(c) + return err +} + +// Savepoint starts a new transaction with a name. +// (See http://sqlite.org/lang_savepoint.html) +func (c *Conn) Savepoint(name string) error { + return c.FastExec(Mprintf("SAVEPOINT %Q", name)) +} + +// ReleaseSavepoint causes all savepoints back to and including the most recent savepoint with a matching name to be removed from the transaction stack. +// (See http://sqlite.org/lang_savepoint.html) +func (c *Conn) ReleaseSavepoint(name string) error { + return c.FastExec(Mprintf("RELEASE %Q", name)) +} + +// RollbackSavepoint reverts the state of the database back to what it was just before the corresponding SAVEPOINT. +// (See http://sqlite.org/lang_savepoint.html) +func (c *Conn) RollbackSavepoint(name string) error { + return c.FastExec(Mprintf("ROLLBACK TO SAVEPOINT %Q", name)) +} + +/* +func (c *Conn) exec(cmd string) error { + s, err := c.prepare(cmd) + if err != nil { + return err + } + defer s.finalize() + rv := C.sqlite3_step(s.stmt) + if Errno(rv) != Done { // this check cannot be done with sqlite3_exec + return s.error(rv, "Conn.exec(%q)", cmd) + } + return nil +} +*/ + +// FastExec executes one or many non-parameterized statement(s) (separated by semi-colon) with no control and no stmt cache. +func (c *Conn) FastExec(sql string) error { + sqlstr := C.CString(sql) + err := c.error(C.sqlite3_exec(c.db, sqlstr, nil, nil, nil)) + C.free(unsafe.Pointer(sqlstr)) + return err +} + +// Close closes a database connection and any dangling statements. +// (See http://sqlite.org/c3ref/close.html) +func (c *Conn) Close() error { + if c == nil { + return errors.New("nil sqlite database") + } + if c.db == nil { + return nil + } + + c.stmtCache.flush() + + rv := C.sqlite3_close(c.db) + + if rv&0xFF == C.SQLITE_BUSY { + // Dangling statements + stmt := C.sqlite3_next_stmt(c.db, nil) + for stmt != nil { + if C.sqlite3_stmt_busy(stmt) != 0 { + Log(C.SQLITE_MISUSE, "Dangling statement (not reset): \""+C.GoString(C.sqlite3_sql(stmt))+"\"") + } else { + Log(C.SQLITE_MISUSE, "Dangling statement (not finalized): \""+C.GoString(C.sqlite3_sql(stmt))+"\"") + } + C.sqlite3_finalize(stmt) + stmt = C.sqlite3_next_stmt(c.db, nil) + } + rv = C.sqlite3_close(c.db) + } + + if rv != C.SQLITE_OK { + Log(int32(rv), "error while closing Conn") + return c.error(rv, "Conn.Close") + } + c.db = nil + return nil +} + +// IsBusy tells if at least one statement has not been reset/finalized. +func (c *Conn) IsBusy() bool { + stmt := C.sqlite3_next_stmt(c.db, nil) + for stmt != nil { + if C.sqlite3_stmt_busy(stmt) != 0 { + return true + } + stmt = C.sqlite3_next_stmt(c.db, stmt) + } + return false +} + +// IsClosed tells if the database connection has been closed. +func (c *Conn) IsClosed() bool { + return c == nil || c.db == nil +} diff --git a/vendor/github.com/gwenn/gosqlite/stmt.go b/vendor/github.com/gwenn/gosqlite/stmt.go new file mode 100644 index 0000000..120055e --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/stmt.go @@ -0,0 +1,1185 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include +#include + +// These wrappers are necessary because SQLITE_TRANSIENT +// is a pointer constant, and cgo doesn't translate them correctly. +// The definition in sqlite3.h is: +// +// typedef void (*sqlite3_destructor_type)(void*); +// #define SQLITE_STATIC ((sqlite3_destructor_type)0) +// #define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) + +static inline int my_bind_text(sqlite3_stmt *stmt, int pidx, const char *data, int data_len) { + return sqlite3_bind_text(stmt, pidx, data, data_len, free); +} +static inline int my_bind_empty_text(sqlite3_stmt *stmt, int pidx) { + return sqlite3_bind_text(stmt, pidx, "", 0, SQLITE_STATIC); +} +static inline int my_bind_blob(sqlite3_stmt *stmt, int pidx, void *data, int data_len) { + return sqlite3_bind_blob(stmt, pidx, data, data_len, SQLITE_TRANSIENT); +} +*/ +import "C" + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "math" + "reflect" + "strings" + "time" + "unsafe" +) + +const ( + i64 = unsafe.Sizeof(int(0)) > 4 +) + +// StmtError is a wrapper for all SQLite statement related error. +type StmtError struct { + ConnError + s *Stmt +} + +// SQL returns the SQL associated with the prepared statement in error. +func (e StmtError) SQL() string { + return e.s.SQL() +} + +func (s *Stmt) error(rv C.int, details ...string) error { + if s == nil { + return errors.New("nil sqlite statement") + } + if rv == C.SQLITE_OK { + return nil + } + err := ConnError{c: s.c, code: Errno(rv), msg: C.GoString(C.sqlite3_errmsg(s.c.db))} + if len(details) > 0 { + err.details = details[0] + } + return StmtError{err, s} +} + +func (s *Stmt) specificError(msg string, a ...interface{}) error { + return StmtError{ConnError{c: s.c, code: ErrSpecific, msg: fmt.Sprintf(msg, a...)}, s} +} + +// CheckTypeMismatch enables type check in Scan methods (default true) +var CheckTypeMismatch = true + +// Stmt represents a single SQL statement. +// (See http://sqlite.org/c3ref/stmt.html) +type Stmt struct { + c *Conn + stmt *C.sqlite3_stmt + sql string + tail string + columnCount int + cols map[string]int // cached columns index by name + bindParameterCount int + params map[string]int // cached parameter index by name + affinities []Affinity // cached columns type affinity + // Tell if the stmt should be cached (default true) + Cacheable bool +} + +func (c *Conn) prepare(sql string, args ...interface{}) (*Stmt, error) { + if c == nil { + return nil, errors.New("nil sqlite database") + } + sqlstr := C.CString(sql) + defer C.free(unsafe.Pointer(sqlstr)) + var stmt *C.sqlite3_stmt + var tail *C.char + rv := C.sqlite3_prepare_v2(c.db, sqlstr, -1, &stmt, &tail) + if rv != C.SQLITE_OK { + // C.sqlite3_finalize(stmt) // If there is an error, *stmt is set to NULL + return nil, c.error(rv, sql) + } + var t string + if tail != nil && *tail != '\000' { + t = C.GoString(tail) + } + s := &Stmt{c: c, stmt: stmt, tail: strings.TrimSpace(t), columnCount: -1, bindParameterCount: -1} + if len(args) > 0 { + err := s.Bind(args...) + if err != nil { + s.finalize() + return nil, err + } + } + return s, nil +} + +// Prepare first looks in the statement cache or compiles the SQL statement. +// And optionally bind values. +// (See sqlite3_prepare_v2: http://sqlite.org/c3ref/prepare.html) +func (c *Conn) Prepare(sql string, args ...interface{}) (*Stmt, error) { + s := c.stmtCache.find(sql) + if s != nil { + if len(args) > 0 { + err := s.Bind(args...) + if err != nil { + s.finalize() // don't put it back in the cache + return nil, err + } + } + return s, nil + } + s, err := c.prepare(sql, args...) + if s != nil && s.stmt != nil { + s.Cacheable = true + } + return s, err +} + +// Exec is a one-step statement execution. +// Don't use it with SELECT or anything that returns data. +// The Stmt is reset at each call. +// (See http://sqlite.org/c3ref/bind_blob.html, http://sqlite.org/c3ref/step.html) +func (s *Stmt) Exec(args ...interface{}) error { + err := s.Bind(args...) + if err != nil { + return err + } + return s.exec() +} +func (s *Stmt) exec() error { + rv := C.sqlite3_step(s.stmt) + C.sqlite3_reset(s.stmt) + err := Errno(rv) + if err != Done { + if err == Row { + return s.specificError("don't use exec with anything that returns data such as %q", s.SQL()) + } + return s.error(rv, "Stmt.exec") + } + if s.ColumnCount() > 0 { + return s.specificError("don't use exec with anything that returns data such as %q", s.SQL()) + } + return nil +} + +// ExecDml is like Exec but returns the number of rows that were changed or inserted or deleted. +// Don't use it with SELECT or anything that returns data. +// The Stmt is reset at each call. +func (s *Stmt) ExecDml(args ...interface{}) (changes int, err error) { + err = s.Exec(args...) + if err != nil { + return -1, err + } + return s.c.Changes(), nil +} + +// Insert is like ExecDml but returns the autoincremented rowid. +// Don't use it with SELECT or anything that returns data. +// The Stmt is reset at each call. +func (s *Stmt) Insert(args ...interface{}) (rowid int64, err error) { + n, err := s.ExecDml(args...) + if err != nil { + return -1, err + } + if n == 0 { // No change => no insert... + return -1, nil + } + return s.c.LastInsertRowid(), nil +} + +// Select helps executing SELECT statement: +// (1) it binds the specified args, +// (2) it steps on the rows returned, +// (3) it delegates scanning to a callback function. +// The callback function is invoked for each result row coming out of the statement. +// +// s, err := db.Prepare(...) +// // TODO error handling +// defer s.Finalize() +// err = s.Select(func(s *Stmt) error { +// //Scan +// }) +// // TODO error handling +func (s *Stmt) Select(rowCallbackHandler func(s *Stmt) error, args ...interface{}) error { + if len(args) > 0 { + err := s.Bind(args...) + if err != nil { + return err + } + } + if s.ColumnCount() == 0 { + return s.specificError("don't use Select with query that returns no data such as %q", s.SQL()) + } + for { + if ok, err := s.Next(); err != nil { + return err + } else if !ok { + break + } + if err := rowCallbackHandler(s); err != nil { + return err + } + } + return nil +} + +// SelectOneRow helps executing SELECT statement that is expected to return only one row. +// Args are for scanning (not binding). +// Returns false if there is no matching row. +// No check is done to ensure that no more than one row is returned by the statement. +// TODO Create a SelectUniqueRow that checks that the row is unique. +func (s *Stmt) SelectOneRow(args ...interface{}) (found bool, err error) { + if found, err = s.Next(); err != nil { + return false, err + } else if !found { + if s.ColumnCount() == 0 { + return false, s.specificError("don't use SelectOneRow with query that returns no data such as %q", s.SQL()) + } + return false, nil + } + return true, s.Scan(args...) +} + +// BindParameterCount returns the number of SQL parameters. +// FIXME If parameters of the ?NNN form are used, there may be gaps in the list. +// (See http://sqlite.org/c3ref/bind_parameter_count.html) +func (s *Stmt) BindParameterCount() int { + if s.bindParameterCount == -1 { + s.bindParameterCount = int(C.sqlite3_bind_parameter_count(s.stmt)) + } + return s.bindParameterCount +} + +// BindParameterIndex returns the index of a parameter with a given name (cached). +// The first host parameter has an index of 1, not 0. +// (See http://sqlite.org/c3ref/bind_parameter_index.html) +func (s *Stmt) BindParameterIndex(name string) (int, error) { + if s.params == nil { + count := s.BindParameterCount() + s.params = make(map[string]int, count) + } + index, ok := s.params[name] + if ok { + return index, nil + } + cname := C.CString(name) + index = int(C.sqlite3_bind_parameter_index(s.stmt, cname)) + C.free(unsafe.Pointer(cname)) + if index == 0 { + return index, s.specificError("invalid parameter name: %q", name) + } + s.params[name] = index + return index, nil +} + +// BindParameterName returns the name of a wildcard parameter (not cached). +// Returns "" if the index is out of range or if the wildcard is unnamed. +// The first host parameter has an index of 1, not 0. +// (See http://sqlite.org/c3ref/bind_parameter_name.html) +func (s *Stmt) BindParameterName(index int) (string, error) { + name := C.sqlite3_bind_parameter_name(s.stmt, C.int(index)) + if name == nil { + return "", s.specificError("invalid parameter index: %d", index) + } + return C.GoString(name), nil +} + +// NamedBind binds parameters by their name (name1, value1, ...) +func (s *Stmt) NamedBind(args ...interface{}) error { + if len(args)%2 != 0 { + return s.specificError("expected an even number of arguments: %d", len(args)) + } + for i := 0; i < len(args); i += 2 { + name, ok := args[i].(string) + if !ok { + return s.specificError("non-string param name at %d: %T", i, args[i]) + } + index, err := s.BindParameterIndex(name) // How to look up only once for one statement ? + if err != nil { + return err + } + err = s.BindByIndex(index, args[i+1]) + if err != nil { + return err + } + } + return nil +} + +// Bind binds parameters by their index. +// Calls sqlite3_bind_parameter_count and sqlite3_bind_(blob|double|int|int64|null|text) depending on args type/kind. +// (See http://sqlite.org/c3ref/bind_blob.html) +func (s *Stmt) Bind(args ...interface{}) error { + n := s.BindParameterCount() + if n != len(args) { + return s.specificError("incorrect argument count for Stmt.Bind: have %d want %d", len(args), n) + } + + for i, v := range args { + err := s.BindByIndex(i+1, v) + if err != nil { + return err + } + } + return nil +} + +// NullIfEmptyString transforms empty string to null when true (true by default) +var NullIfEmptyString = true + +// NullIfZeroTime transforms zero time (time.Time.IsZero) to null when true (true by default) +var NullIfZeroTime = true + +// BindByIndex binds value to the specified host parameter of the prepared statement. +// Value's type/kind is used to find the storage class. +// The leftmost SQL parameter has an index of 1. +func (s *Stmt) BindByIndex(index int, value interface{}) error { + i := C.int(index) + var rv C.int + switch value := value.(type) { + case nil: + rv = C.sqlite3_bind_null(s.stmt, i) + case string: + if len(value) == 0 { + if NullIfEmptyString { + rv = C.sqlite3_bind_null(s.stmt, i) + } else { + rv = C.my_bind_empty_text(s.stmt, i) + } + } else { + if i64 && len(value) > math.MaxInt32 { + return s.specificError("string too big: %d at index %d", len(value), index) + } + rv = C.my_bind_text(s.stmt, i, C.CString(value), C.int(len(value))) + } + case int: + if i64 { + rv = C.sqlite3_bind_int64(s.stmt, i, C.sqlite3_int64(value)) + } else { + rv = C.sqlite3_bind_int(s.stmt, i, C.int(value)) + } + case int32: + rv = C.sqlite3_bind_int(s.stmt, i, C.int(value)) + case int64: + rv = C.sqlite3_bind_int64(s.stmt, i, C.sqlite3_int64(value)) + case byte: + rv = C.sqlite3_bind_int(s.stmt, i, C.int(value)) + case bool: + rv = C.sqlite3_bind_int(s.stmt, i, btocint(value)) + case float32: + rv = C.sqlite3_bind_double(s.stmt, i, C.double(value)) + case float64: + rv = C.sqlite3_bind_double(s.stmt, i, C.double(value)) + case []byte: + if i64 && len(value) > math.MaxInt32 { + return s.specificError("blob too big: %d at index %d", len(value), index) + } + if len(value) == 0 { + rv = C.sqlite3_bind_zeroblob(s.stmt, i, 0) + } else { + rv = C.my_bind_blob(s.stmt, i, unsafe.Pointer(&value[0]), C.int(len(value))) + } + case time.Time: + if NullIfZeroTime && value.IsZero() { + rv = C.sqlite3_bind_null(s.stmt, i) + } else if s.c.DefaultTimeLayout == "" { + rv = C.sqlite3_bind_int64(s.stmt, i, C.sqlite3_int64(value.Unix())) + } else { + v := value.Format(s.c.DefaultTimeLayout) + rv = C.my_bind_text(s.stmt, i, C.CString(v), C.int(len(v))) + } + case ZeroBlobLength: + rv = C.sqlite3_bind_zeroblob(s.stmt, i, C.int(value)) + case driver.Valuer: + v, err := value.Value() + if err != nil { + return err + } + return s.BindByIndex(index, v) + default: + return s.BindReflect(index, value) + } + return s.error(rv, "Stmt.Bind") +} + +// BindReflect binds value to the specified host parameter of the prepared statement. +// Value's (reflect) Kind is used to find the storage class. +// The leftmost SQL parameter has an index of 1. +func (s *Stmt) BindReflect(index int, value interface{}) error { + i := C.int(index) + var rv C.int + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String: + vs := v.String() // TODO NullIfEmptyString + rv = C.my_bind_text(s.stmt, i, C.CString(vs), C.int(len(vs))) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + rv = C.sqlite3_bind_int64(s.stmt, i, C.sqlite3_int64(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ui := v.Uint() + if ui > math.MaxInt64 { + return s.specificError("int overflow") + } + rv = C.sqlite3_bind_int64(s.stmt, i, C.sqlite3_int64(ui)) + case reflect.Bool: + rv = C.sqlite3_bind_int(s.stmt, i, btocint(v.Bool())) + case reflect.Float32, reflect.Float64: + rv = C.sqlite3_bind_double(s.stmt, i, C.double(v.Float())) + default: + name, _ := s.BindParameterName(index) + return s.specificError("unsupported type in Bind: %T (index: %d, name: %q)", value, index, name) + } + return s.error(rv, "Stmt.Bind") +} + +// Next evaluates an SQL statement +// +// With custom error handling: +// for { +// if ok, err := s.Next(); err != nil { +// return nil, err +// } else if !ok { +// break +// } +// err = s.Scan(&fnum, &inum, &sstr) +// } +// +// (See http://sqlite.org/c3ref/step.html) +func (s *Stmt) Next() (bool, error) { + rv := C.sqlite3_step(s.stmt) + err := Errno(rv) + if err == Row { + return true, nil + } + C.sqlite3_reset(s.stmt) // Release implicit lock as soon as possible (see dbEvalStep in tclsqlite3.c) + if err != Done { + return false, s.error(rv, "Stmt.Next") + } + // TODO Check column count > 0 + return false, nil +} + +// Reset terminates the current execution of an SQL statement +// and reset it back to its starting state so that it can be reused. +// (See http://sqlite.org/c3ref/reset.html) +func (s *Stmt) Reset() error { + return s.error(C.sqlite3_reset(s.stmt), "Stmt.Reset") +} + +// ClearBindings resets all bindings on a prepared statement. +// (See http://sqlite.org/c3ref/clear_bindings.html) +func (s *Stmt) ClearBindings() error { + return s.error(C.sqlite3_clear_bindings(s.stmt), "Stmt.ClearBindings") +} + +// ColumnCount returns the number of columns in the result set for the statement (with or without row). +// (See http://sqlite.org/c3ref/column_count.html) +func (s *Stmt) ColumnCount() int { + if s.columnCount == -1 { + s.columnCount = int(C.sqlite3_column_count(s.stmt)) + } + return s.columnCount +} + +// DataCount returns the number of values available from the current row of the currently executing statement. +// Same as ColumnCount() except when there is no (more) row, it returns 0. +// (See http://sqlite.org/c3ref/data_count.html) +func (s *Stmt) DataCount() int { + return int(C.sqlite3_data_count(s.stmt)) +} + +// ColumnName returns the name of the Nth column of the result set returned by the SQL statement. (not cached) +// The leftmost column is number 0. +// (See http://sqlite.org/c3ref/column_name.html) +func (s *Stmt) ColumnName(index int) string { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + // If there is no AS clause then the name of the column is unspecified and may change from one release of SQLite to the next. + return C.GoString(C.sqlite3_column_name(s.stmt, C.int(index))) +} + +// ColumnNames returns the name of the columns of the result set returned by the SQL statement. (not cached) +func (s *Stmt) ColumnNames() []string { + count := s.ColumnCount() + names := make([]string, count) + for i := 0; i < count; i++ { + names[i] = s.ColumnName(i) + } + return names +} + +// Type enumerates SQLite fundamental datatypes +type Type uint8 + +func (t Type) String() string { + return typeText[t] +} + +// SQLite fundamental datatypes +const ( + Integer = Type(C.SQLITE_INTEGER) + Float = Type(C.SQLITE_FLOAT) + Blob = Type(C.SQLITE_BLOB) + Null = Type(C.SQLITE_NULL) + Text = Type(C.SQLITE3_TEXT) +) + +var typeText = map[Type]string{ + Integer: "Integer", + Float: "Float", + Blob: "Blob", + Null: "Null", + Text: "Text", +} + +// ColumnType returns the datatype code for the initial data type of the result column. +// The leftmost column is number 0. +// Should not be cached (valid only for one row) (see dynamic type http://www.sqlite.org/datatype3.html) +// +// After a type conversion, the value returned by sqlite3_column_type() is undefined. +// (See sqlite3_column_type: http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ColumnType(index int) Type { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + return Type(C.sqlite3_column_type(s.stmt, C.int(index))) // TODO request all columns type at once +} + +// NamedScan scans result values from a query by name (name1, value1, ...). +// +// NULL value is converted to 0 if arg type is *int,*int64,*float,*float64, to "" for *string, to []byte{} for *[]byte and to false for *bool. +// To avoid NULL conversion, arg type must be **T. +// Calls sqlite3_column_(blob|double|int|int64|text) depending on args type. +// (See http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) NamedScan(args ...interface{}) error { + if len(args)%2 != 0 { + return s.specificError("expected an even number of arguments: %d", len(args)) + } + for i := 0; i < len(args); i += 2 { + name, ok := args[i].(string) + if !ok { + return s.specificError("non-string field name at %d: %T", i, args[i]) + } + index, err := s.ColumnIndex(name) // How to look up only once for one statement ? + if err != nil { + return err + } + ptr := args[i+1] + _, err = s.ScanByIndex(index, ptr) + if err != nil { + return err + } + } + return nil +} + +// Scan scans result values from a query. +// +// NULL value is converted to 0 if arg type is *int,*int64,*float,*float64, to "" for *string, to []byte{} for *[]byte and to false for *bool. +// To avoid NULL conversion, arg type must be **T. +// Calls sqlite3_column_(blob|double|int|int64|text) depending on args type/kind. +// (See http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) Scan(args ...interface{}) error { + n := s.ColumnCount() + if n != len(args) { // What happens when the number of arguments is less than the number of columns? + return s.specificError("incorrect argument count for Stmt.Scan: have %d want %d", len(args), n) + } + + for i, v := range args { + _, err := s.ScanByIndex(i, v) + if err != nil { + return err + } + } + return nil +} + +// SQL returns the SQL associated with a prepared statement. +// (See http://sqlite.org/c3ref/sql.html) +func (s *Stmt) SQL() string { + if s.sql == "" { + s.sql = C.GoString(C.sqlite3_sql(s.stmt)) + } + return s.sql +} + +// Empty returns true when then input text contains no SQL (if the input is an empty string or a comment) +func (s *Stmt) Empty() bool { + return s.stmt == nil +} + +// Tail returns the unused portion of the original SQL statement. +func (s *Stmt) Tail() string { + return s.tail +} + +// ColumnIndex returns the column index in a result set for a given column name. +// The leftmost column is number 0. +// Must scan all columns (but result is cached). +// (See http://sqlite.org/c3ref/column_name.html) +func (s *Stmt) ColumnIndex(name string) (int, error) { + if s.cols == nil { + count := s.ColumnCount() + s.cols = make(map[string]int, count) + for i := 0; i < count; i++ { + s.cols[s.ColumnName(i)] = i + } + } + index, ok := s.cols[name] + if ok { + return index, nil + } + return -1, s.specificError("invalid column name: %q", name) +} + +// ScanByName scans result value from a query. +// Returns true when column is null. +// Calls sqlite3_column_(blob|double|int|int64|text) depending on arg type/kind. +// (See http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanByName(name string, value interface{}) (isNull bool, err error) { + index, err := s.ColumnIndex(name) + if err != nil { + return false, err + } + return s.ScanByIndex(index, value) +} + +// ScanByIndex scans result value from a query. +// The leftmost column/index is number 0. +// +// Destination type is specified by the caller (except when value type is *interface{}). +// The value must be of one of the following types/kinds: +// (*)*string +// (*)*int,int8,int16,int32,int64 +// (*)*uint,uint8,uint16,uint32,uint64 +// (*)*bool +// (*)*float32,float64 +// (*)*[]byte +// *time.Time +// sql.Scanner +// *interface{} +// +// Returns true when column is null. +// Calls sqlite3_column_(blob|double|int|int64|text) depending on arg type/kind. +// (See http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanByIndex(index int, value interface{}) (isNull bool, err error) { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + switch value := value.(type) { + case nil: + case *string: + *value, isNull = s.ScanText(index) + case **string: + var st string + if st, isNull = s.ScanText(index); isNull { + *value = nil + } else { + **value = st + } + case *int: + *value, isNull, err = s.ScanInt(index) + case **int: + var i int + if i, isNull, err = s.ScanInt(index); err == nil { + if isNull { + *value = nil + } else { + **value = i + } + } + case *int32: + *value, isNull, err = s.ScanInt32(index) + case **int32: + var i int32 + if i, isNull, err = s.ScanInt32(index); err == nil { + if isNull { + *value = nil + } else { + **value = i + } + } + case *int64: + *value, isNull, err = s.ScanInt64(index) + case **int64: + var i int64 + if i, isNull, err = s.ScanInt64(index); err == nil { + if isNull { + *value = nil + } else { + **value = i + } + } + case *byte: + *value, isNull, err = s.ScanByte(index) + case **byte: + var b byte + if b, isNull, err = s.ScanByte(index); err == nil { + if isNull { + *value = nil + } else { + **value = b + } + } + case *bool: + *value, isNull, err = s.ScanBool(index) + case **bool: + var b bool + if b, isNull, err = s.ScanBool(index); err == nil { + if isNull { + *value = nil + } else { + **value = b + } + } + case *float64: + *value, isNull, err = s.ScanDouble(index) + case **float64: + var f float64 + if f, isNull, err = s.ScanDouble(index); err == nil { + if isNull { + *value = nil + } else { + **value = f + } + } + case *[]byte: + *value, isNull = s.ScanBlob(index) + case **[]byte: + var bs []byte + if bs, isNull = s.ScanBlob(index); isNull { + *value = nil + } else { + **value = bs + } + case *time.Time: // go fix doesn't like this type! + *value, isNull, err = s.ScanTime(index) + case sql.Scanner: + var v interface{} + v, isNull = s.ScanValue(index) + err = value.Scan(v) + case *interface{}: + *value, isNull = s.ScanValue(index) + default: + return s.ScanReflect(index, value) + } + return +} + +// ScanReflect scans result value from a query. +// The leftmost column/index is number 0. +// +// Destination type is specified by the caller. +// The value must be of one of the following kinds: +// *string +// *int,int8,int16,int32,int64 +// *uint,uint8,uint16,uint32,uint64 +// *bool +// *float32,float64 +// +// Returns true when column is null. +func (s *Stmt) ScanReflect(index int, v interface{}) (isNull bool, err error) { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return false, s.specificError("ScanReflect unsupported type %T", v) + } + dv := reflect.Indirect(rv) + switch dv.Kind() { + case reflect.String: + var t string + t, isNull = s.ScanText(index) + dv.SetString(t) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + if i, isNull, err = s.ScanInt64(index); err == nil { + dv.SetInt(i) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + var i int64 + if i, isNull, err = s.ScanInt64(index); err == nil { + if i < 0 { + err = s.specificError("negative value: %d", i) + } else { + dv.SetUint(uint64(i)) + } + } + case reflect.Bool: + var b bool + if b, isNull, err = s.ScanBool(index); err == nil { + dv.SetBool(b) + } + case reflect.Float32, reflect.Float64: + var f float64 + if f, isNull, err = s.ScanDouble(index); err == nil { + dv.SetFloat(f) + } + default: + return false, s.specificError("unsupported type in Scan: %T", v) + } + return +} + +// ScanValue scans result value from a query. +// The leftmost column/index is number 0. +// +// Destination type is decided by SQLite. +// The returned value will be of one of the following types: +// nil +// string +// int64 +// float64 +// []byte +// +// Calls sqlite3_column_(blob|double|int|int64|text) depending on columns type. +// (See http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanValue(index int) (value interface{}, isNull bool) { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + switch s.ColumnType(index) { + case Null: + return nil, true + case Text: // does not work as expected if column type affinity is TEXT but inserted value was a numeric + if s.c.ScanNumericalAsTime && s.c.DefaultTimeLayout != "" && s.ColumnTypeAffinity(index) == Numerical { + p := C.sqlite3_column_text(s.stmt, C.int(index)) + txt := C.GoString((*C.char)(unsafe.Pointer(p))) + value, err := time.Parse(s.c.DefaultTimeLayout, txt) + if err == nil { + return value, false + } + Log(-1, err.Error()) + } + p := C.sqlite3_column_text(s.stmt, C.int(index)) + return C.GoString((*C.char)(unsafe.Pointer(p))), false + case Integer: + value := int64(C.sqlite3_column_int64(s.stmt, C.int(index))) + if s.c.ScanNumericalAsTime && s.c.DefaultTimeLayout == "" && s.ColumnTypeAffinity(index) == Numerical { + return time.Unix(value, 0), false + } + return value, false + case Float: // does not work as expected if column type affinity is REAL but inserted value was an integer + return float64(C.sqlite3_column_double(s.stmt, C.int(index))), false + case Blob: + // The return value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. + p := C.sqlite3_column_blob(s.stmt, C.int(index)) + n := C.sqlite3_column_bytes(s.stmt, C.int(index)) + // value = (*[1 << 30]byte)(unsafe.Pointer(p))[:n] + return C.GoBytes(p, n), false // The memory space used to hold strings and BLOBs is freed automatically. + } + panic("The column type is not one of SQLITE_INTEGER, SQLITE_FLOAT, SQLITE_TEXT, SQLITE_BLOB, or SQLITE_NULL") +} + +// ScanValues is like ScanValue on several columns. +func (s *Stmt) ScanValues(values []interface{}) { + for i := range values { + values[i], _ = s.ScanValue(i) + } +} + +// ScanText scans result value from a query. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_text: http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanText(index int) (value string, isNull bool) { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + p := C.sqlite3_column_text(s.stmt, C.int(index)) + if p == nil { + isNull = true + } else { + value = C.GoString((*C.char)(unsafe.Pointer(p))) + } + return +} + +// ScanInt scans result value from a query. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_int: http://sqlite.org/c3ref/column_blob.html) +// TODO Factorize with ScanByte, ScanBool +func (s *Stmt) ScanInt(index int) (value int, isNull bool, err error) { + ctype := s.ColumnType(index) + if ctype == Null { + isNull = true + } else { + if CheckTypeMismatch { + err = s.checkTypeMismatch(ctype, Integer) + } + if i64 { + value = int(C.sqlite3_column_int64(s.stmt, C.int(index))) + } else { + value = int(C.sqlite3_column_int(s.stmt, C.int(index))) + } + } + return +} + +// ScanInt32 scans result value from a query. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_int: http://sqlite.org/c3ref/column_blob.html) +// TODO Factorize with ScanByte, ScanBool +func (s *Stmt) ScanInt32(index int) (value int32, isNull bool, err error) { + ctype := s.ColumnType(index) + if ctype == Null { + isNull = true + } else { + if CheckTypeMismatch { + err = s.checkTypeMismatch(ctype, Integer) + } + value = int32(C.sqlite3_column_int(s.stmt, C.int(index))) + } + return +} + +// ScanInt64 scans result value from a query. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_int64: http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanInt64(index int) (value int64, isNull bool, err error) { + ctype := s.ColumnType(index) + if ctype == Null { + isNull = true + } else { + if CheckTypeMismatch { + err = s.checkTypeMismatch(ctype, Integer) + } + value = int64(C.sqlite3_column_int64(s.stmt, C.int(index))) + } + return +} + +// ScanByte scans result value from a query. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_int: http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanByte(index int) (value byte, isNull bool, err error) { + ctype := s.ColumnType(index) + if ctype == Null { + isNull = true + } else { + if CheckTypeMismatch { + err = s.checkTypeMismatch(ctype, Integer) + } + value = byte(C.sqlite3_column_int(s.stmt, C.int(index))) + } + return +} + +// ScanBool scans result value from a query. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_int: http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanBool(index int) (value bool, isNull bool, err error) { + ctype := s.ColumnType(index) + if ctype == Null { + isNull = true + } else { + if CheckTypeMismatch { + err = s.checkTypeMismatch(ctype, Integer) + } + value = C.sqlite3_column_int(s.stmt, C.int(index)) != 0 + } + return +} + +// ScanDouble scans result value from a query. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_double: http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanDouble(index int) (value float64, isNull bool, err error) { + ctype := s.ColumnType(index) + if ctype == Null { + isNull = true + } else { + if CheckTypeMismatch { + err = s.checkTypeMismatch(ctype, Float) + } + value = float64(C.sqlite3_column_double(s.stmt, C.int(index))) + } + return +} + +// ScanBlob scans result value from a query. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_blob: http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanBlob(index int) (value []byte, isNull bool) { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + p := C.sqlite3_column_blob(s.stmt, C.int(index)) + if p == nil { + isNull = true + } else { + n := C.sqlite3_column_bytes(s.stmt, C.int(index)) + // value = (*[1 << 30]byte)(unsafe.Pointer(p))[:n] + value = C.GoBytes(p, n) // The memory space used to hold strings and BLOBs is freed automatically. + } + return +} + +// ScanRawBytes scans result value from a query without making any copy. +// The leftmost column/index is number 0. +// Returns true when column is null. +// (See sqlite3_column_blob: http://sqlite.org/c3ref/column_blob.html) +func (s *Stmt) ScanRawBytes(index int) (value []byte, isNull bool) { + if index < 0 || index >= s.ColumnCount() { + panic(fmt.Sprintf("column index %d out of range [0,%d[.", index, s.ColumnCount())) + } + p := C.sqlite3_column_blob(s.stmt, C.int(index)) + if p == nil { + isNull = true + } else { + n := C.sqlite3_column_bytes(s.stmt, C.int(index)) + value = (*[1 << 30]byte)(unsafe.Pointer(p))[:n:n] + } + return +} + +// ScanTime scans result value from a query. +// If time is persisted as string without timezone, UTC is used. +// If time is persisted as numeric, local is used. +// The leftmost column/index is number 0. +// Returns true when column is null. +// The column type affinity must be consistent with the format used (INTEGER or NUMERIC or NONE for unix time, REAL or NONE for julian day). +func (s *Stmt) ScanTime(index int) (value time.Time, isNull bool, err error) { + ctype := s.ColumnType(index) + switch ctype { + case Null: + isNull = true + case Text: // does not work as expected if column type affinity is TEXT but inserted value was a numeric + p := C.sqlite3_column_text(s.stmt, C.int(index)) + txt := C.GoString((*C.char)(unsafe.Pointer(p))) + var layout string + switch len(txt) { + case 5: // HH:MM + layout = "15:04" + case 8: // HH:MM:SS + layout = "15:04:05" + case 10: // YYYY-MM-DD + layout = "2006-01-02" + case 12: // HH:MM:SS.SSS + layout = "15:04:05.000" + case 16: // YYYY-MM-DDTHH:MM + if txt[10] == 'T' { + layout = "2006-01-02T15:04" + } else { + layout = "2006-01-02 15:04" + } + case 19: // YYYY-MM-DDTHH:MM:SS + if txt[10] == 'T' { + layout = "2006-01-02T15:04:05" + } else { + layout = "2006-01-02 15:04:05" + } + case 23: // YYYY-MM-DDTHH:MM:SS.SSS + if txt[10] == 'T' { + layout = "2006-01-02T15:04:05.000" + } else { + layout = "2006-01-02 15:04:05.000" + } + default: // YYYY-MM-DDTHH:MM:SS.SSSZhh:mm or parse error + if len(txt) > 10 && txt[10] == 'T' { + layout = "2006-01-02T15:04:05.000Z07:00" + } else { + layout = "2006-01-02 15:04:05.000Z07:00" + } + } + value, err = time.Parse(layout, txt) // UTC except when timezone is specified + case Integer: + unixepoch := int64(C.sqlite3_column_int64(s.stmt, C.int(index))) + value = time.Unix(unixepoch, 0) // local time + case Float: // does not work as expected if column affinity is REAL but inserted value was an integer + jd := float64(C.sqlite3_column_double(s.stmt, C.int(index))) + value = JulianDayToLocalTime(jd) // local time + default: + err = s.specificError("unexpected column type affinity for time persistence: %q", ctype) + } + return +} + +// Only lossy conversion is reported as error. +func (s *Stmt) checkTypeMismatch(source, target Type) error { + switch target { + case Integer: + switch source { + case Float: // does not work if column type affinity is REAL but inserted value was an integer + fallthrough + case Text: // does not work if column type affinity is TEXT but inserted value was an integer + fallthrough + case Blob: + return s.specificError("type mismatch, source %q vs target %q", source, target) + } + case Float: + switch source { + case Text: // does not work if column type affinity is TEXT but inserted value was a real + fallthrough + case Blob: + return s.specificError("type mismatch, source %q vs target %q", source, target) + } + } + return nil +} + +// Busy returns true if the prepared statement is in need of being reset. +// (See http://sqlite.org/c3ref/stmt_busy.html) +func (s *Stmt) Busy() bool { + return C.sqlite3_stmt_busy(s.stmt) != 0 +} + +// Finalize destroys a prepared statement. +// (See http://sqlite.org/c3ref/finalize.html) +func (s *Stmt) Finalize() error { + if s == nil { + return errors.New("nil sqlite statement") + } + if s.Cacheable && s.c != nil && s.c.db != nil { + return s.c.stmtCache.release(s) + } + return s.finalize() +} +func (s *Stmt) finalize() error { + if s == nil { + return errors.New("nil sqlite statement") + } + if s.stmt == nil { + return nil + } + if s.c == nil || s.c.db == nil { + Log(C.SQLITE_MISUSE, "sqlite statement with already closed database connection") + return errors.New("sqlite statement with already closed database connection") + } + rv := C.sqlite3_finalize(s.stmt) // must be called only once + s.stmt = nil + if rv != C.SQLITE_OK { + Log(int32(rv), "error while finalizing Stmt") + return s.error(rv, "Stmt.finalize") + } + return nil +} + +// Conn finds the database handle of a prepared statement. +// (Like http://sqlite.org/c3ref/db_handle.html) +func (s *Stmt) Conn() *Conn { + return s.c +} + +// ReadOnly returns true if the prepared statement is guaranteed to not modify the database. +// (See http://sqlite.org/c3ref/stmt_readonly.html) +func (s *Stmt) ReadOnly() bool { + return C.sqlite3_stmt_readonly(s.stmt) != 0 +} diff --git a/vendor/github.com/gwenn/gosqlite/test.csv b/vendor/github.com/gwenn/gosqlite/test.csv new file mode 100644 index 0000000..708f93f --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/test.csv @@ -0,0 +1,6 @@ +"colA","colB","colC" +1,2,3 +a,b,c +a,"b",c +"a","b","c .. z" +"a","b","c,d" diff --git a/vendor/github.com/gwenn/gosqlite/trace.c b/vendor/github.com/gwenn/gosqlite/trace.c new file mode 100644 index 0000000..92d686e --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/trace.c @@ -0,0 +1,47 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +//#include "_cgo_export.h" + +extern void goXTrace(void *udp, const char *sql); + +void goSqlite3Trace(sqlite3 *db, void *udp) { + sqlite3_trace(db, goXTrace, udp); +} + +extern void goXProfile(void *udp, const char *sql, sqlite3_uint64 nanoseconds); + +void goSqlite3Profile(sqlite3 *db, void *udp) { + sqlite3_profile(db, goXProfile, udp); +} + +extern int goXAuth(void *udp, int action, const char *arg1, const char *arg2, const char *dbName, const char *triggerName); + +int goSqlite3SetAuthorizer(sqlite3 *db, void *udp) { + return sqlite3_set_authorizer(db, goXAuth, udp); +} + +extern int goXBusy(void *udp, int count); + +int goSqlite3BusyHandler(sqlite3 *db, void *udp) { + return sqlite3_busy_handler(db, goXBusy, udp); +} + +extern int goXProgress(void *udp); + +void goSqlite3ProgressHandler(sqlite3 *db, int numOps, void *udp) { + sqlite3_progress_handler(db, numOps, goXProgress, udp); +} + +extern void goXLog(void *udp, int err, const char *msg); + +int goSqlite3ConfigLog(void *udp) { + if (udp) { + return sqlite3_config(SQLITE_CONFIG_LOG, goXLog, udp); + } else { + return sqlite3_config(SQLITE_CONFIG_LOG, 0, 0); + } +} diff --git a/vendor/github.com/gwenn/gosqlite/trace.go b/vendor/github.com/gwenn/gosqlite/trace.go new file mode 100644 index 0000000..366c924 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/trace.go @@ -0,0 +1,447 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include + +void goSqlite3Trace(sqlite3 *db, void *udp); +void goSqlite3Profile(sqlite3 *db, void *udp); +int goSqlite3SetAuthorizer(sqlite3 *db, void *udp); +int goSqlite3BusyHandler(sqlite3 *db, void *udp); +void goSqlite3ProgressHandler(sqlite3 *db, int numOps, void *udp); + +// cgo doesn't support varargs +static inline void my_log(int iErrCode, char *msg) { + sqlite3_log(iErrCode, msg); +} + +int goSqlite3ConfigLog(void *udp); +*/ +import "C" + +import ( + "fmt" + "io" + "time" + "unsafe" +) + +// Tracer is the signature of a trace function. +// See Conn.Trace +type Tracer func(udp interface{}, sql string) + +type sqliteTrace struct { + f Tracer + udp interface{} +} + +//export goXTrace +func goXTrace(udp unsafe.Pointer, sql *C.char) { + arg := (*sqliteTrace)(udp) + arg.f(arg.udp, C.GoString(sql)) +} + +// Trace registers or clears a trace function. +// Prepared statement placeholders are replaced/logged with their assigned values. +// There can only be a single tracer defined for each database connection. +// Setting a new tracer clears the old one. +// If f is nil, the current tracer is removed. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// (See sqlite3_trace, http://sqlite.org/c3ref/profile.html) +func (c *Conn) Trace(f Tracer, udp interface{}) { + if f == nil { + c.trace = nil + C.sqlite3_trace(c.db, nil, nil) + return + } + // To make sure it is not gced, keep a reference in the connection. + c.trace = &sqliteTrace{f, udp} + C.goSqlite3Trace(c.db, unsafe.Pointer(c.trace)) +} + +// Profiler is the signature of a profile function. +// See Conn.Profile +type Profiler func(udp interface{}, sql string, duration time.Duration) + +type sqliteProfile struct { + f Profiler + udp interface{} +} + +//export goXProfile +func goXProfile(udp unsafe.Pointer, sql *C.char, nanoseconds C.sqlite3_uint64) { + arg := (*sqliteProfile)(udp) + arg.f(arg.udp, C.GoString(sql), time.Duration(int64(nanoseconds))) +} + +// Profile registers or clears a profile function. +// Prepared statement placeholders are not logged with their assigned values. +// There can only be a single profiler defined for each database connection. +// Setting a new profiler clears the old one. +// If f is nil, the current profiler is removed. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// (See sqlite3_profile, http://sqlite.org/c3ref/profile.html) +func (c *Conn) Profile(f Profiler, udp interface{}) { + if f == nil { + c.profile = nil + C.sqlite3_profile(c.db, nil, nil) + return + } + // To make sure it is not gced, keep a reference in the connection. + c.profile = &sqliteProfile{f, udp} + C.goSqlite3Profile(c.db, unsafe.Pointer(c.profile)) +} + +// Auth enumerates Authorizer return codes +type Auth int32 + +// Authorizer return codes +const ( + AuthOk Auth = C.SQLITE_OK + AuthDeny Auth = C.SQLITE_DENY + AuthIgnore Auth = C.SQLITE_IGNORE +) + +// Action enumerates Authorizer action codes +type Action int32 + +// Authorizer action codes +const ( + CreateIndex Action = C.SQLITE_CREATE_INDEX + CreateTable Action = C.SQLITE_CREATE_TABLE + CreateTempIndex Action = C.SQLITE_CREATE_TEMP_INDEX + CreateTempTable Action = C.SQLITE_CREATE_TEMP_TABLE + CreateTempTrigger Action = C.SQLITE_CREATE_TEMP_TRIGGER + CreateTempView Action = C.SQLITE_CREATE_TEMP_VIEW + CreateTrigger Action = C.SQLITE_CREATE_TRIGGER + CreateView Action = C.SQLITE_CREATE_VIEW + Delete Action = C.SQLITE_DELETE + DropIndex Action = C.SQLITE_DROP_INDEX + DropTable Action = C.SQLITE_DROP_TABLE + DropTempIndex Action = C.SQLITE_DROP_TEMP_INDEX + DropTempTable Action = C.SQLITE_DROP_TEMP_TABLE + DropTempTrigger Action = C.SQLITE_DROP_TEMP_TRIGGER + DropTempView Action = C.SQLITE_DROP_TEMP_VIEW + DropTrigger Action = C.SQLITE_DROP_TRIGGER + DropView Action = C.SQLITE_DROP_VIEW + Insert Action = C.SQLITE_INSERT + Pragma Action = C.SQLITE_PRAGMA + Read Action = C.SQLITE_READ + Select Action = C.SQLITE_SELECT + Transaction Action = C.SQLITE_TRANSACTION + Update Action = C.SQLITE_UPDATE + Attach Action = C.SQLITE_ATTACH + Detach Action = C.SQLITE_DETACH + AlterTable Action = C.SQLITE_ALTER_TABLE + Reindex Action = C.SQLITE_REINDEX + Analyze Action = C.SQLITE_ANALYZE + CreateVTable Action = C.SQLITE_CREATE_VTABLE + DropVTable Action = C.SQLITE_DROP_VTABLE + Function Action = C.SQLITE_FUNCTION + Savepoint Action = C.SQLITE_SAVEPOINT + Copy Action = C.SQLITE_COPY +) + +func (a Action) String() string { + switch a { + case CreateIndex: + return "CreateIndex" + case CreateTable: + return "CreateTable" + case CreateTempIndex: + return "CreateTempIndex" + case CreateTempTable: + return "CreateTempTable" + case CreateTempTrigger: + return "CreateTempTrigger" + case CreateTempView: + return "CreateTempView" + case CreateTrigger: + return "CreateTrigger" + case CreateView: + return "CreateView" + case Delete: + return "Delete" + case DropIndex: + return "DropIndex" + case DropTable: + return "DropTable" + case DropTempIndex: + return "DropTempIndex" + case DropTempTable: + return "DropTempTable" + case DropTempTrigger: + return "DropTempTrigger" + case DropTempView: + return "DropTempView" + case DropTrigger: + return "DropTrigger" + case DropView: + return "DropView" + case Insert: + return "Insert" + case Pragma: + return "Pragma" + case Read: + return "Read" + case Select: + return "Select" + case Transaction: + return "Transaction" + case Update: + return "Update" + case Attach: + return "Attach" + case Detach: + return "Detach" + case AlterTable: + return "AlterTable" + case Reindex: + return "Reindex" + case Analyze: + return "Analyze" + case CreateVTable: + return "CreateVTable" + case DropVTable: + return "DropVTable" + case Function: + return "Function" + case Savepoint: + return "Savepoint" + case Copy: + return "Copy" + } + return fmt.Sprintf("Unknown Action: %d", a) +} + +// Authorizer is the signature of an access authorization function. +// See Conn.SetAuthorizer +type Authorizer func(udp interface{}, action Action, arg1, arg2, dbName, triggerName string) Auth + +type sqliteAuthorizer struct { + f Authorizer + udp interface{} +} + +//export goXAuth +func goXAuth(udp unsafe.Pointer, action int, arg1, arg2, dbName, triggerName *C.char) C.int { + arg := (*sqliteAuthorizer)(udp) + result := arg.f(arg.udp, Action(action), C.GoString(arg1), C.GoString(arg2), C.GoString(dbName), C.GoString(triggerName)) + return C.int(result) +} + +// SetAuthorizer sets or clears the access authorization function. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// (See http://sqlite.org/c3ref/set_authorizer.html) +func (c *Conn) SetAuthorizer(f Authorizer, udp interface{}) error { + if f == nil { + c.authorizer = nil + return c.error(C.sqlite3_set_authorizer(c.db, nil, nil), "Conn.SetAuthorizer") + } + // To make sure it is not gced, keep a reference in the connection. + c.authorizer = &sqliteAuthorizer{f, udp} + return c.error(C.goSqlite3SetAuthorizer(c.db, unsafe.Pointer(c.authorizer)), "Conn.SetAuthorizer") +} + +// BusyHandler is the signature of callback to handle SQLITE_BUSY errors. +// Returns true to try again. Returns false to abort. +// See Conn.BusyHandler +type BusyHandler func(udp interface{}, count int) bool + +type sqliteBusyHandler struct { + f BusyHandler + udp interface{} +} + +//export goXBusy +func goXBusy(udp unsafe.Pointer, count int) C.int { + arg := (*sqliteBusyHandler)(udp) + result := arg.f(arg.udp, count) + return btocint(result) +} + +// BusyHandler registers a callback to handle SQLITE_BUSY errors. +// There can only be a single busy handler defined for each database connection. +// Setting a new busy handler clears any previously set handler. +// If f is nil, the current handler is removed. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// (See http://sqlite.org/c3ref/busy_handler.html) +func (c *Conn) BusyHandler(f BusyHandler, udp interface{}) error { + if f == nil { + c.busyHandler = nil + return c.error(C.sqlite3_busy_handler(c.db, nil, nil), "= 1.6 and cgocheck enabled. +// (See http://sqlite.org/c3ref/progress_handler.html) +func (c *Conn) ProgressHandler(f ProgressHandler, numOps int32, udp interface{}) { + if f == nil { + c.progressHandler = nil + C.sqlite3_progress_handler(c.db, 0, nil, nil) + return + } + // To make sure it is not gced, keep a reference in the connection. + c.progressHandler = &sqliteProgressHandler{f, udp} + C.goSqlite3ProgressHandler(c.db, C.int(numOps), unsafe.Pointer(c.progressHandler)) +} + +// StmtStatus enumerates status parameters for prepared statements +type StmtStatus int32 + +// Status counters for prepared statements +const ( + StmtStatusFullScanStep StmtStatus = C.SQLITE_STMTSTATUS_FULLSCAN_STEP + StmtStatusSort StmtStatus = C.SQLITE_STMTSTATUS_SORT + StmtStatusAutoIndex StmtStatus = C.SQLITE_STMTSTATUS_AUTOINDEX + +// StmtStatusVmStep StmtStatus = C.SQLITE_STMTSTATUS_VM_STEP +) + +// Status returns the value of a status counter for a prepared statement. +// (See http://sqlite.org/c3ref/stmt_status.html) +func (s *Stmt) Status(op StmtStatus, reset bool) int { + return int(C.sqlite3_stmt_status(s.stmt, C.int(op), btocint(reset))) +} + +// MemoryUsed returns the number of bytes of memory currently outstanding (malloced but not freed). +// (See sqlite3_memory_used: http://sqlite.org/c3ref/memory_highwater.html) +func MemoryUsed() int64 { + return int64(C.sqlite3_memory_used()) +} + +// MemoryHighwater returns the maximum value of MemoryUsed() since the high-water mark was last reset. +// (See sqlite3_memory_highwater: http://sqlite.org/c3ref/memory_highwater.html) +func MemoryHighwater(reset bool) int64 { + return int64(C.sqlite3_memory_highwater(btocint(reset))) +} + +// SoftHeapLimit returns the limit on heap size. +// (See http://sqlite.org/c3ref/soft_heap_limit64.html) +func SoftHeapLimit() int64 { + return SetSoftHeapLimit(-1) +} + +// SetSoftHeapLimit imposes a limit on heap size. +// (See http://sqlite.org/c3ref/soft_heap_limit64.html) +func SetSoftHeapLimit(n int64) int64 { + return int64(C.sqlite3_soft_heap_limit64(C.sqlite3_int64(n))) +} + +// Complete determines if an SQL statement is complete. +// (See http://sqlite.org/c3ref/complete.html) +func Complete(sql string) (bool, error) { + cs := C.CString(sql) + rv := C.sqlite3_complete(cs) + C.free(unsafe.Pointer(cs)) + if rv == C.SQLITE_NOMEM { + return false, ErrNoMem + } + return rv != 0, nil +} + +// Log writes a message into the error log established by ConfigLog method. +// (See http://sqlite.org/c3ref/log.html and http://www.sqlite.org/errlog.html) +// +// Applications can use the sqlite3_log(E,F,..) API to send new messages to the log, if desired, but this is discouraged. +func Log(err /*Errno*/ int32, msg string) { + cs := C.CString(msg) + C.my_log(C.int(err), cs) + C.free(unsafe.Pointer(cs)) +} + +// Logger is the signature of SQLite logger implementation. +// See ConfigLog +type Logger func(udp interface{}, err error, msg string) + +type sqliteLogger struct { + f Logger + udp interface{} +} + +//export goXLog +func goXLog(udp unsafe.Pointer, err int, msg *C.char) { + arg := (*sqliteLogger)(udp) + arg.f(arg.udp, Errno(err), C.GoString(msg)) + return +} + +var logger *sqliteLogger + +// ConfigLog configures the logger of the SQLite library. +// Only one logger can be registered at a time for the whole program. +// The logger must be threadsafe. +// Cannot be used with Go >= 1.6 and cgocheck enabled when udp is not nil. +// (See sqlite3_config(SQLITE_CONFIG_LOG,...): http://sqlite.org/c3ref/config.html and http://www.sqlite.org/errlog.html) +func ConfigLog(f Logger, udp interface{}) error { + var rv C.int + if f == nil { + logger = nil + rv = C.goSqlite3ConfigLog(nil) + } else { + // To make sure it is not gced, keep a reference. + logger = &sqliteLogger{f, udp} + rv = C.goSqlite3ConfigLog(unsafe.Pointer(logger)) + } + if rv == C.SQLITE_OK { + return nil + } + return Errno(rv) +} + +// ExplainQueryPlan outputs the corresponding EXPLAIN QUERY PLAN report to the specified writer +// (See http://sqlite.org/eqp.html) +func (s *Stmt) ExplainQueryPlan(w io.Writer) error { + sql := s.SQL() + if len(sql) == 0 { + return s.specificError("empty statement") + } + explain := "EXPLAIN QUERY PLAN " + s.SQL() + + sExplain, err := s.Conn().prepare(explain) + if err != nil { + return err + } + defer sExplain.finalize() + + var selectid, order, from int + var detail string + err = sExplain.execQuery(func(s *Stmt) error { + if err := s.Scan(&selectid, &order, &from, &detail); err != nil { + return err + } + fmt.Fprintf(w, "%d\t%d\t%d\t%s\n", selectid, order, from, detail) + return nil + }) + return err +} diff --git a/vendor/github.com/gwenn/gosqlite/util.go b/vendor/github.com/gwenn/gosqlite/util.go new file mode 100644 index 0000000..b17a9bf --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/util.go @@ -0,0 +1,72 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include + +// cgo doesn't support varargs +static inline char *my_mprintf(char *zFormat, char *arg) { + return sqlite3_mprintf(zFormat, arg); +} +*/ +import "C" + +import ( + "fmt" + "reflect" + "strings" + "unsafe" +) + +// Mprintf is like fmt.Printf but implements some additional formatting options +// that are useful for constructing SQL statements. +// (See http://sqlite.org/c3ref/mprintf.html) +func Mprintf(format string, arg string) string { + zSQL := mPrintf(format, arg) + defer C.sqlite3_free(unsafe.Pointer(zSQL)) + return C.GoString(zSQL) +} +func mPrintf(format, arg string) *C.char { // TODO may return nil when no memory... + cf := C.CString(format) + defer C.free(unsafe.Pointer(cf)) + ca := C.CString(arg) + defer C.free(unsafe.Pointer(ca)) + return C.my_mprintf(cf, ca) +} + +func btocint(b bool) C.int { + if b { + return 1 + } + return 0 +} +func cstring(s string) (*C.char, C.int) { + cs := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return (*C.char)(unsafe.Pointer(cs.Data)), C.int(cs.Len) +} + +/* +func gostring(cs *C.char) string { + var x reflect.StringHeader + x.Data = uintptr(unsafe.Pointer(cs)) + x.Len = int(C.strlen(cs)) + return *(*string)(unsafe.Pointer(&x)) +} +*/ + +func escapeQuote(identifier string) string { + if strings.ContainsRune(identifier, '"') { // escape quote by doubling them + identifier = strings.Replace(identifier, `"`, `""`, -1) + } + return identifier +} +func doubleQuote(dbName string) string { + if dbName == "main" || dbName == "temp" { + return dbName + } + return fmt.Sprintf(`"%s"`, escapeQuote(dbName)) // surround identifier with quote +} diff --git a/vendor/github.com/gwenn/gosqlite/vtab.c b/vendor/github.com/gwenn/gosqlite/vtab.c new file mode 100644 index 0000000..8616772 --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/vtab.c @@ -0,0 +1,164 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include +#include +#include "_cgo_export.h" + +typedef struct goVTab goVTab; + +struct goVTab { + sqlite3_vtab base; + void *vTab; +}; + +static int cXInit(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr, int isCreate) { + void *vTab = goMInit(db, pAux, argc, (char**)argv, pzErr, isCreate); + if (!vTab || *pzErr) { + return SQLITE_ERROR; + } + goVTab *pvTab = (goVTab *)sqlite3_malloc(sizeof(goVTab)); + if (!pvTab) { + *pzErr = sqlite3_mprintf("%s", "Out of memory"); + return SQLITE_NOMEM; + } + memset(pvTab, 0, sizeof(goVTab)); + pvTab->vTab = vTab; + + *ppVTab = (sqlite3_vtab *)pvTab; + *pzErr = 0; + return SQLITE_OK; +} + +static inline int cXCreate(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr) { + return cXInit(db, pAux, argc, argv, ppVTab, pzErr, 1); +} +static inline int cXConnect(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr) { + return cXInit(db, pAux, argc, argv, ppVTab, pzErr, 0); +} + +static inline int cXBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *info) { + // TODO + return SQLITE_OK; +} + +static int cXRelease(sqlite3_vtab *pVTab, int isDestroy) { + char *pzErr = goVRelease(((goVTab*)pVTab)->vTab, isDestroy); + if (pzErr) { + if (pVTab->zErrMsg) + sqlite3_free(pVTab->zErrMsg); + pVTab->zErrMsg = pzErr; + return SQLITE_ERROR; + } + if (pVTab->zErrMsg) + sqlite3_free(pVTab->zErrMsg); + sqlite3_free(pVTab); + return SQLITE_OK; +} + +static inline int cXDisconnect(sqlite3_vtab *pVTab) { + return cXRelease(pVTab, 0); +} +static inline int cXDestroy(sqlite3_vtab *pVTab) { + return cXRelease(pVTab, 1); +} + +typedef struct goVTabCursor goVTabCursor; + +struct goVTabCursor { + sqlite3_vtab_cursor base; + void *vTabCursor; +}; + +static int cXOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) { + void *vTabCursor = goVOpen(((goVTab*)pVTab)->vTab, &(pVTab->zErrMsg)); + goVTabCursor *pCursor = (goVTabCursor *)sqlite3_malloc(sizeof(goVTabCursor)); + if (!pCursor) { + return SQLITE_NOMEM; + } + memset(pCursor, 0, sizeof(goVTabCursor)); + pCursor->vTabCursor = vTabCursor; + *ppCursor = (sqlite3_vtab_cursor *)pCursor; + return SQLITE_OK; +} + +static int setErrMsg(sqlite3_vtab_cursor *pCursor, char *pzErr) { + if (pCursor->pVtab->zErrMsg) + sqlite3_free(pCursor->pVtab->zErrMsg); + pCursor->pVtab->zErrMsg = pzErr; + return SQLITE_ERROR; +} + +static int cXClose(sqlite3_vtab_cursor *pCursor) { + char *pzErr = goVClose(((goVTabCursor*)pCursor)->vTabCursor); + if (pzErr) { + return setErrMsg(pCursor, pzErr); + } + sqlite3_free(pCursor); + return SQLITE_OK; +} +static int cXFilter(sqlite3_vtab_cursor *pCursor, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) { + // TODO idxNum, idxStr, argc, argv are useless when cXBestIndex is empty + char *pzErr = goVFilter(((goVTabCursor*)pCursor)->vTabCursor); + if (pzErr) { + return setErrMsg(pCursor, pzErr); + } + return SQLITE_OK; +} +static int cXNext(sqlite3_vtab_cursor *pCursor) { + char *pzErr = goVNext(((goVTabCursor*)pCursor)->vTabCursor); + if (pzErr) { + return setErrMsg(pCursor, pzErr); + } + return SQLITE_OK; +} +static inline int cXEof(sqlite3_vtab_cursor *pCursor) { + return goVEof(((goVTabCursor*)pCursor)->vTabCursor); +} +static int cXColumn(sqlite3_vtab_cursor *pCursor, sqlite3_context *ctx, int i) { + char *pzErr = goVColumn(((goVTabCursor*)pCursor)->vTabCursor, ctx, i); + if (pzErr) { + return setErrMsg(pCursor, pzErr); + } + return SQLITE_OK; +} +static int cXRowid(sqlite3_vtab_cursor *pCursor, sqlite3_int64 *pRowid) { + char *pzErr = goVRowid(((goVTabCursor*)pCursor)->vTabCursor, pRowid); + if (pzErr) { + return setErrMsg(pCursor, pzErr); + } + return SQLITE_OK; +} + +static sqlite3_module goModule = { + 0, /* iVersion */ + cXCreate, /* xCreate - create a table */ + cXConnect, /* xConnect - connect to an existing table */ + cXBestIndex, /* xBestIndex - Determine search strategy */ + cXDisconnect, /* xDisconnect - Disconnect from a table */ + cXDestroy, /* xDestroy - Drop a table */ + cXOpen, /* xOpen - open a cursor */ + cXClose, /* xClose - close a cursor */ + cXFilter, /* xFilter - configure scan constraints */ + cXNext, /* xNext - advance a cursor */ + cXEof, /* xEof */ + cXColumn, /* xColumn - read data */ + cXRowid, /* xRowid - read data */ +// TODO + 0, /* xUpdate - write data */ + 0, /* xBegin - begin transaction */ + 0, /* xSync - sync transaction */ + 0, /* xCommit - commit transaction */ + 0, /* xRollback - rollback transaction */ + 0, /* xFindFunction - function overloading */ + 0, /* xRename - rename the table */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0 /* xRollbackTo */ +}; + + +int goSqlite3CreateModule(sqlite3 *db, const char *zName, void *pClientData) { + return sqlite3_create_module_v2(db, zName, &goModule, pClientData, goMDestroy); +} diff --git a/vendor/github.com/gwenn/gosqlite/vtab.go b/vendor/github.com/gwenn/gosqlite/vtab.go new file mode 100644 index 0000000..d597acc --- /dev/null +++ b/vendor/github.com/gwenn/gosqlite/vtab.go @@ -0,0 +1,308 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package sqlite + +/* +#include +#include + +int goSqlite3CreateModule(sqlite3 *db, const char *zName, void *pClientData); +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +type sqliteModule struct { + c *Conn + name string + module Module + vts map[*sqliteVTab]struct{} +} + +type sqliteVTab struct { + module *sqliteModule + vTab VTab + vtcs map[*sqliteVTabCursor]struct{} +} + +type sqliteVTabCursor struct { + vTab *sqliteVTab + vTabCursor VTabCursor +} + +//export goMInit +func goMInit(db, pClientData unsafe.Pointer, argc int, argv **C.char, pzErr **C.char, isCreate int) unsafe.Pointer { + m := (*sqliteModule)(pClientData) + if m.c.db != (*C.sqlite3)(db) { + *pzErr = mPrintf("%s", "Inconsistent db handles") + return nil + } + args := make([]string, argc) + var A []*C.char + slice := reflect.SliceHeader{Data: uintptr(unsafe.Pointer(argv)), Len: argc, Cap: argc} + a := reflect.NewAt(reflect.TypeOf(A), unsafe.Pointer(&slice)).Elem().Interface() + for i, s := range a.([]*C.char) { + args[i] = C.GoString(s) + } + var vTab VTab + var err error + if isCreate == 1 { + vTab, err = m.module.Create(m.c, args) + } else { + vTab, err = m.module.Connect(m.c, args) + } + + if err != nil { + *pzErr = mPrintf("%s", err.Error()) + return nil + } + vt := &sqliteVTab{m, vTab, nil} + // prevents 'vt' from being gced + if m.vts == nil { + m.vts = make(map[*sqliteVTab]struct{}) + } + m.vts[vt] = struct{}{} + *pzErr = nil + return unsafe.Pointer(vt) +} + +//export goVRelease +func goVRelease(pVTab unsafe.Pointer, isDestroy int) *C.char { + vt := (*sqliteVTab)(pVTab) + var err error + if isDestroy == 1 { + err = vt.vTab.Destroy() + } else { + err = vt.vTab.Disconnect() + } + if err != nil { + return mPrintf("%s", err.Error()) + } + // TODO Check vt.vtcs is empty + vt.vtcs = nil + delete(vt.module.vts, vt) + return nil +} + +//export goVOpen +func goVOpen(pVTab unsafe.Pointer, pzErr **C.char) unsafe.Pointer { + vt := (*sqliteVTab)(pVTab) + vTabCursor, err := vt.vTab.Open() + if err != nil { + *pzErr = mPrintf("%s", err.Error()) + return nil + } + // prevents 'vt' from being gced + vtc := &sqliteVTabCursor{vt, vTabCursor} + if vt.vtcs == nil { + vt.vtcs = make(map[*sqliteVTabCursor]struct{}) + } + vt.vtcs[vtc] = struct{}{} + *pzErr = nil + return unsafe.Pointer(vtc) +} + +//export goVClose +func goVClose(pCursor unsafe.Pointer) *C.char { + vtc := (*sqliteVTabCursor)(pCursor) + err := vtc.vTabCursor.Close() + if err != nil { + return mPrintf("%s", err.Error()) + } + delete(vtc.vTab.vtcs, vtc) + return nil +} + +//export goMDestroy +func goMDestroy(pClientData unsafe.Pointer) { + m := (*sqliteModule)(pClientData) + m.module.DestroyModule() + // TODO Check m.vts is empty + m.vts = nil + delete(m.c.modules, m.name) +} + +//export goVFilter +func goVFilter(pCursor unsafe.Pointer) *C.char { + vtc := (*sqliteVTabCursor)(pCursor) + err := vtc.vTabCursor.Filter() + if err != nil { + return mPrintf("%s", err.Error()) + } + return nil +} + +//export goVNext +func goVNext(pCursor unsafe.Pointer) *C.char { + vtc := (*sqliteVTabCursor)(pCursor) + err := vtc.vTabCursor.Next() + if err != nil { + return mPrintf("%s", err.Error()) + } + return nil +} + +//export goVEof +func goVEof(pCursor unsafe.Pointer) C.int { + vtc := (*sqliteVTabCursor)(pCursor) + return btocint(vtc.vTabCursor.EOF()) +} + +//export goVColumn +func goVColumn(pCursor, cp unsafe.Pointer, col int) *C.char { + vtc := (*sqliteVTabCursor)(pCursor) + c := (*Context)(cp) + err := vtc.vTabCursor.Column(c, col) + if err != nil { + return mPrintf("%s", err.Error()) + } + return nil +} + +//export goVRowid +func goVRowid(pCursor unsafe.Pointer, pRowid *C.sqlite3_int64) *C.char { + vtc := (*sqliteVTabCursor)(pCursor) + rowid, err := vtc.vTabCursor.Rowid() + if err != nil { + return mPrintf("%s", err.Error()) + } + *pRowid = C.sqlite3_int64(rowid) + return nil +} + +// Module is a "virtual table module", it defines the implementation of a virtual tables. +// (See http://sqlite.org/c3ref/module.html) +type Module interface { + Create(c *Conn, args []string) (VTab, error) // See http://sqlite.org/vtab.html#xcreate + Connect(c *Conn, args []string) (VTab, error) // See http://sqlite.org/vtab.html#xconnect + DestroyModule() // See http://sqlite.org/c3ref/create_module.html +} + +// VTab describes a particular instance of the virtual table. +// (See http://sqlite.org/c3ref/vtab.html) +type VTab interface { + BestIndex( /*sqlite3_index_info**/ ) error // See http://sqlite.org/vtab.html#xbestindex + Disconnect() error // See http://sqlite.org/vtab.html#xdisconnect + Destroy() error // See http://sqlite.org/vtab.html#sqlite3_module.xDestroy + Open() (VTabCursor, error) // See http://sqlite.org/vtab.html#xopen +} + +// VTabExtended lists optional/extended functions. +// (See http://sqlite.org/c3ref/vtab.html) +type VTabExtended interface { + VTab + Update( /*int argc, sqlite3_value **argv, */ rowid int64) error + + Begin() error + Sync() error + Commit() error + Rollback() error + + //FindFunction(nArg int, name string /*, void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), void **ppArg*/) error + Rename(newName string) error + + Savepoint(i int) error + Release(i int) error + RollbackTo(i int) error +} + +// VTabCursor describes cursors that point into the virtual table and are used to loop through the virtual table. +// (See http://sqlite.org/c3ref/vtab_cursor.html) +type VTabCursor interface { + Close() error // See http://sqlite.org/vtab.html#xclose + Filter( /*idxNum int, idxStr string, int argc, sqlite3_value **argv*/ ) error // See http://sqlite.org/vtab.html#xfilter + Next() error // See http://sqlite.org/vtab.html#xnext + EOF() bool // See http://sqlite.org/vtab.html#xeof + // col is zero-based so the first column is numbered 0 + Column(c *Context, col int) error // See http://sqlite.org/vtab.html#xcolumn + Rowid() (int64, error) // See http://sqlite.org/vtab.html#xrowid +} + +// DeclareVTab declares the Schema of a virtual table. +// (See http://sqlite.org/c3ref/declare_vtab.html) +func (c *Conn) DeclareVTab(sql string) error { + zSQL := C.CString(sql) + defer C.free(unsafe.Pointer(zSQL)) + return c.error(C.sqlite3_declare_vtab(c.db, zSQL), fmt.Sprintf("Conn.DeclareVTab(%q)", sql)) +} + +// CreateModule registers a virtual table implementation. +// Cannot be used with Go >= 1.6 and cgocheck enabled. +// (See http://sqlite.org/c3ref/create_module.html) +func (c *Conn) CreateModule(moduleName string, module Module) error { + mname := C.CString(moduleName) + defer C.free(unsafe.Pointer(mname)) + // To make sure it is not gced, keep a reference in the connection. + udm := &sqliteModule{c, moduleName, module, nil} + if len(c.modules) == 0 { + c.modules = make(map[string]*sqliteModule) + } + c.modules[moduleName] = udm // FIXME What happens if different modules are registered with the same name? + return c.error(C.goSqlite3CreateModule(c.db, mname, unsafe.Pointer(udm)), + fmt.Sprintf("Conn.CreateModule(%q)", moduleName)) +} + +/* +GO C +CreateModule( int sqlite3_create_module_v2( + |- c *Conn |- sqlite3 *db + |- moduleName string |- const char *zName + |- goModule |- const sqlite3_module *p (~) Methods for the module + |- *sqliteModule |- void *pClientData () Client data for xCreate/xConnect + \- goVDestroy \- void(*xDestroy)(void*) () Client data destructor function +) ) + +goModule sqlite3_module { + |- int iVersion +goMInit |- int (*xCreate)(sqlite3*, void *pAux, int argc, char **argv, sqlite3_vtab **ppVTab, + char **pzErr) +goMInit |- int (*xConnect)(sqlite3*, void *pAux, int argc, char **argv, sqlite3_vtab **ppVTab, + char **pzErr) +x |- int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*) +goVRelease |- int (*xDisconnect)(sqlite3_vtab *pVTab) +goVRelease |- int (*xDestroy)(sqlite3_vtab *pVTab) +goVOpen |- int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) +goVClose |- int (*xClose)(sqlite3_vtab_cursor*) +x |- int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, int argc, + sqlite3_value **argv) +x |- int (*xNext)(sqlite3_vtab_cursor*) +x |- int (*xEof)(sqlite3_vtab_cursor*) +x |- int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int) +x |- int (*xRowid)(sqlite3_vtab_cursor*, sqlite_int64 *pRowid) +o |- int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite_int64 *) +o |- int (*xBegin)(sqlite3_vtab *pVTab) +o |- int (*xSync)(sqlite3_vtab *pVTab) +o |- int (*xCommit)(sqlite3_vtab *pVTab) +o |- int (*xRollback)(sqlite3_vtab *pVTab) +o |- int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), void **ppArg) +x |- int (*xRename)(sqlite3_vtab *pVtab, const char *zNew) +o |- int (*xSavepoint)(sqlite3_vtab *pVTab, int) +o |- int (*xRelease)(sqlite3_vtab *pVTab, int) +o \- int (*xRollbackTo)(sqlite3_vtab *pVTab, int) + } + +DeclareVTab int sqlite3_declare_vtab( (Called in xCreate/xConnect) + |- sqlite3 *db, + \- const char *zCreateTable + ) + +sqliteVTab sqlite3_vtab { (Created by xCreate/xConnect) + |- const sqlite3_module *pModule + |- int nRef + |- char *zErrMsg + \- ... + } + +sqliteVTabCursor sqlite3_vtab_cursor { (Created by xOpen) + |- sqlite3_vtab *pVtab + \- ... + } + +*/ diff --git a/vendor/github.com/gwenn/yacr/.gitignore b/vendor/github.com/gwenn/yacr/.gitignore new file mode 100644 index 0000000..f6e7e52 --- /dev/null +++ b/vendor/github.com/gwenn/yacr/.gitignore @@ -0,0 +1,10 @@ +*.[568ao] +[568a].out +_testmain.go +_obj +_test +*.swp +fuzz/corpus +fuzz/crashers +fuzz/suppressions +fuzz/csv-fuzz.zip \ No newline at end of file diff --git a/vendor/github.com/gwenn/yacr/.travis.yml b/vendor/github.com/gwenn/yacr/.travis.yml new file mode 100644 index 0000000..51ba691 --- /dev/null +++ b/vendor/github.com/gwenn/yacr/.travis.yml @@ -0,0 +1,5 @@ +language: go +sudo: false +go: + - 1.11.x +before_script: go get github.com/gwenn/yacr diff --git a/vendor/github.com/gwenn/yacr/LICENSE b/vendor/github.com/gwenn/yacr/LICENSE new file mode 100644 index 0000000..7448756 --- /dev/null +++ b/vendor/github.com/gwenn/yacr/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gwenn/yacr/README.md b/vendor/github.com/gwenn/yacr/README.md new file mode 100644 index 0000000..bff6dea --- /dev/null +++ b/vendor/github.com/gwenn/yacr/README.md @@ -0,0 +1,27 @@ +[![Build Status][1]][2] [![GoDoc](https://godoc.org/github.com/gwenn/yacr?status.svg)](https://godoc.org/github.com/gwenn/yacr) [![Go Report Card](https://goreportcard.com/badge/github.com/gwenn/yacr)](https://goreportcard.com/report/github.com/gwenn/yacr) [![Coverage Status](https://coveralls.io/repos/github/gwenn/yacr/badge.svg?branch=master)](https://coveralls.io/github/gwenn/yacr?branch=master) + +[1]: https://secure.travis-ci.org/gwenn/yacr.png +[2]: http://www.travis-ci.org/gwenn/yacr +Yet another CSV reader (and writer) with small memory usage. + +All credit goes to: +* Rob Pike, creator of Scanner interface, +* D. Richard Hipp, for his CSV parser implementation. + +There is a standard package named [encoding/csv](http://tip.golang.org/pkg/encoding/csv/). + +
+BenchmarkParsing	    5000	    381518 ns/op	 256.87 MB/s	    4288 B/op	       5 allocs/op
+BenchmarkQuotedParsing	    5000	    487599 ns/op	 209.19 MB/s	    4288 B/op	       5 allocs/op
+BenchmarkEmbeddedNL	    5000	    594618 ns/op	 201.81 MB/s	    4288 B/op	       5 allocs/op
+BenchmarkStdParser	     500	   5026100 ns/op	  23.88 MB/s	  625499 B/op	   16037 allocs/op
+BenchmarkYacrParser	    5000	    593165 ns/op	 202.30 MB/s	    4288 B/op	       5 allocs/op
+BenchmarkYacrWriter	  200000	      9433 ns/op	  98.05 MB/s	    2755 B/op	       0 allocs/op
+BenchmarkStdWriter	  100000	     27804 ns/op	  33.27 MB/s	    2755 B/op	       0 allocs/op
+
+ +USAGES +------ +* [csvdiff](https://github.com/gwenn/csvdiff) +* [csvgrep](https://github.com/gwenn/csvgrep) +* [SQLite import/export/module](https://github.com/gwenn/gosqlite/blob/master/csv.go) diff --git a/vendor/github.com/gwenn/yacr/reader.go b/vendor/github.com/gwenn/yacr/reader.go new file mode 100644 index 0000000..7d6ebd9 --- /dev/null +++ b/vendor/github.com/gwenn/yacr/reader.go @@ -0,0 +1,472 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package yacr is yet another CSV reader (and writer) with small memory usage. +package yacr + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" +) + +// Reader provides an interface for reading CSV data +// (compatible with rfc4180 and extended with the option of having a separator other than ","). +// Successive calls to the Scan method will step through the 'fields', skipping the separator/newline between the fields. +// The EndOfRecord method tells when a field is terminated by a line break. +type Reader struct { + *bufio.Scanner + sep byte // values separator + quoted bool // specify if values may be quoted (when they contain separator or newline) + guess bool // try to guess separator based on the file header + eor bool // true when the most recent field has been terminated by a newline (not a separator). + lineno int // current line number (not record number) + + Trim bool // trim spaces (only on unquoted values). Break rfc4180 rule: "Spaces are considered part of a field and should not be ignored." + Comment byte // character marking the start of a line comment. When specified (not 0), line comment appears as empty line. + Lazy bool // specify if quoted values may contains unescaped quote not followed by a separator or a newline + + Headers map[string]int // Index (first is 1) by header +} + +// DefaultReader creates a "standard" CSV reader (separator is comma and quoted mode active) +func DefaultReader(rd io.Reader) *Reader { + return NewReader(rd, ',', true, false) +} + +// NewReader returns a new CSV scanner to read from r. +// When quoted is false, values must not contain a separator or newline. +func NewReader(r io.Reader, sep byte, quoted, guess bool) *Reader { + s := &Reader{bufio.NewScanner(r), sep, quoted, guess, true, 1, false, 0, false, nil} + s.Split(s.ScanField) + return s +} + +// ScanHeaders loads current line as the header line. +func (s *Reader) ScanHeaders() error { + s.Headers = make(map[string]int) + for i := 1; s.Scan(); i++ { + s.Headers[s.Text()] = i + if s.EndOfRecord() { + break + } + } + return s.Err() +} + +// ScanRecordByName decodes one line fields by name (name1, value1, ...). +// Specified names must match Headers. +func (s *Reader) ScanRecordByName(args ...interface{}) (int, error) { + if len(args)%2 != 0 { + return 0, fmt.Errorf("expected an even number of arguments: %d", len(args)) + } + values := make([]interface{}, len(s.Headers)) + for i := 0; i < len(args); i += 2 { + name, ok := args[i].(string) + if !ok { + return 0, fmt.Errorf("non-string field name at %d: %T", i, args[i]) + } + index, ok := s.Headers[name] + if !ok { + return 0, fmt.Errorf("unknown field name: %s", name) + } + values[index-1] = args[i+1] + } + return s.ScanRecord(values...) +} + +// ScanRecord decodes one line fields to values. +// Empty lines are ignored/skipped. +// It's like fmt.Scan or database.sql.Rows.Scan. +// Returns (0, nil) on EOF, (*, err) on error +// and (n >= 1, nil) on success (n may be less or greater than len(values)). +// var n int +// var err error +// for { +// values := make([]string, N) +// if n, err = s.ScanRecord(&values[0]/*, &values[1], ...*/); err != nil || n == 0 { +// break // or error handling +// } else if (n > N) { +// n = N // ignore extra values +// } +// for _, value := range values[0:n] { +// // ... +// } +// } +// if err != nil { +// // error handling +// } +func (s *Reader) ScanRecord(values ...interface{}) (int, error) { + for i, value := range values { + if !s.Scan() { + return i, s.Err() + } + if i == 0 { // skip empty line (or line comment) + for s.EndOfRecord() && len(s.Bytes()) == 0 { + if !s.Scan() { + return i, s.Err() + } + } + } + if err := s.value(value, true); err != nil { + return i + 1, err + } else if s.EndOfRecord() && i != len(values)-1 { + return i + 1, nil + } + } + if !s.EndOfRecord() { + i := len(values) + for ; !s.EndOfRecord(); i++ { // Consume extra fields + if !s.Scan() { + return i, s.Err() + } + } + return i, nil + } + return len(values), nil +} + +// ScanValue advances to the next token and decodes field's content to value. +// The value may point to data that will be overwritten by a subsequent call to Scan. +func (s *Reader) ScanValue(value interface{}) error { + if !s.Scan() { + return s.Err() + } + return s.value(value, false) +} + +// Value decodes field's content to value. +// The value may point to data that will be overwritten by a subsequent call to Scan. +func (s *Reader) Value(value interface{}) error { + return s.value(value, false) +} +func (s *Reader) value(value interface{}, copied bool) error { + var err error + switch value := value.(type) { + case nil: + case *string: + *value = s.Text() + case *int: + *value, err = strconv.Atoi(s.Text()) + case *int32: + var i int64 + i, err = strconv.ParseInt(s.Text(), 10, 32) + *value = int32(i) + case *int64: + *value, err = strconv.ParseInt(s.Text(), 10, 64) + case *bool: + *value, err = strconv.ParseBool(s.Text()) + case *float64: + *value, err = strconv.ParseFloat(s.Text(), 64) + case *[]byte: + if copied { + v := s.Bytes() + c := make([]byte, len(v)) + copy(c, v) + *value = c + } else { + *value = s.Bytes() + } + case encoding.TextUnmarshaler: + err = value.UnmarshalText(s.Bytes()) + default: + return s.scanReflect(value) + } + return err +} + +func (s *Reader) scanReflect(v interface{}) (err error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return fmt.Errorf("unsupported type %T", v) + } + dv := reflect.Indirect(rv) + switch dv.Kind() { + case reflect.String: + dv.SetString(s.Text()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + i, err = strconv.ParseInt(s.Text(), 10, dv.Type().Bits()) + if err == nil { + dv.SetInt(i) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + var i uint64 + i, err = strconv.ParseUint(s.Text(), 10, dv.Type().Bits()) + if err == nil { + dv.SetUint(i) + } + case reflect.Bool: + var b bool + b, err = strconv.ParseBool(s.Text()) + if err == nil { + dv.SetBool(b) + } + case reflect.Float32, reflect.Float64: + var f float64 + f, err = strconv.ParseFloat(s.Text(), dv.Type().Bits()) + if err == nil { + dv.SetFloat(f) + } + default: + return fmt.Errorf("unsupported type: %T", v) + } + return +} + +// LineNumber returns current line number (not record number) +func (s *Reader) LineNumber() int { + return s.lineno +} + +// EndOfRecord returns true when the most recent field has been terminated by a newline (not a separator). +func (s *Reader) EndOfRecord() bool { + return s.eor +} + +// Sep returns the values separator used/guessed +func (s *Reader) Sep() byte { + return s.sep +} + +// SkipRecords skips n records/headers +func (s *Reader) SkipRecords(n int) error { + i := 0 + for { + if i == n { + return nil + } + if !s.Scan() { + return s.Err() + } + if s.eor { + i++ + } + } +} + +// ScanField implements bufio.SplitFunc for CSV. +// Lexing is adapted from csv_read_one_field function in SQLite3 shell sources. +func (s *Reader) ScanField(data []byte, atEOF bool) (advance int, token []byte, err error) { + var a int + for { + a, token, err = s.scanField(data, atEOF) + advance += a + if err != nil || a == 0 || token != nil { + return + } + data = data[a:] + } +} + +func (s *Reader) scanField(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 && s.eor { + return 0, nil, nil + } + if s.guess { + s.guess = false + if b := guess(data); b > 0 { + s.sep = b + } + } + if s.quoted && len(data) > 0 && data[0] == '"' { // quoted field (may contains separator, newline and escaped quote) + startLineno := s.lineno + escapedQuotes := 0 + strict := true + var c, pc, ppc byte + // Scan until the separator or newline following the closing quote (and ignore escaped quote) + for i := 1; i < len(data); i++ { + c = data[i] + if c == '\n' { + s.lineno++ + } else if c == '"' { + if pc == c { // escaped quote + pc = 0 + escapedQuotes++ + continue + } + } + if pc == '"' && c == s.sep { + s.eor = false + return i + 1, unescapeQuotes(data[1:i-1], escapedQuotes, strict), nil + } else if pc == '"' && c == '\n' { + s.eor = true + return i + 1, unescapeQuotes(data[1:i-1], escapedQuotes, strict), nil + } else if c == '\n' && pc == '\r' && ppc == '"' { + s.eor = true + return i + 1, unescapeQuotes(data[1:i-2], escapedQuotes, strict), nil + } + if pc == '"' && c != '\r' { + if s.Lazy { + strict = false + } else { + return 0, nil, fmt.Errorf("unescaped %c character at line %d", pc, s.lineno) + } + } + ppc = pc + pc = c + } + if atEOF { + if c == '"' { + s.eor = true + return len(data), unescapeQuotes(data[1:len(data)-1], escapedQuotes, strict), nil + } + // If we're at EOF, we have a non-terminated field. + return 0, nil, fmt.Errorf("non-terminated quoted field between lines %d and %d", startLineno, s.lineno) + } + } else if s.eor && s.Comment != 0 && len(data) > 0 && data[0] == s.Comment { // line comment + for i, c := range data { + if c == '\n' { + s.lineno++ + return i + 1, nil, nil + } + } + if atEOF { + return len(data), nil, nil + } + } else { // unquoted field + // Scan until separator or newline, marking end of field. + for i, c := range data { + if c == s.sep { + s.eor = false + if s.Trim { + return i + 1, trim(data[0:i]), nil + } + return i + 1, data[0:i], nil + } else if c == '\n' { + s.lineno++ + if i > 0 && data[i-1] == '\r' { + s.eor = true + if s.Trim { + return i + 1, trim(data[0 : i-1]), nil + } + return i + 1, data[0 : i-1], nil + } + s.eor = true + if s.Trim { + return i + 1, trim(data[0:i]), nil + } + return i + 1, data[0:i], nil + } + } + // If we're at EOF, we have a final field. Return it. + if atEOF { + s.eor = true + if s.Trim { + return len(data), trim(data), nil + } + return len(data), data, nil + } + } + // Request more data. + return 0, nil, nil +} + +func unescapeQuotes(b []byte, count int, strict bool) []byte { + if count == 0 { + return b + } + for i, j := 0, 0; i < len(b); i, j = i+1, j+1 { + b[j] = b[i] + if b[i] == '"' && (strict || i < len(b)-1 && b[i+1] == '"') { + i++ + } + } + return b[:len(b)-count] +} + +func guess(data []byte) byte { + seps := []byte{',', ';', '\t', '|', ':'} + count := make(map[byte]uint) + for _, b := range data { + if bytes.IndexByte(seps, b) >= 0 { + count[b]++ + /*} else if b == '\n' { + break*/ + } + } + var max uint + var sep byte + for b, c := range count { + if c > max { + max = c + sep = b + } + } + return sep +} + +// bytes.TrimSpace may return nil... +func trim(s []byte) []byte { + t := bytes.TrimSpace(s) + if t == nil { + return s[0:0] + } + return t +} + +// IsNumber determines if the current token is a number or not. +// Only works for single-byte encodings (ASCII, ISO-8859-1) and UTF-8. +func (s *Reader) IsNumber() (isNum bool, isReal bool) { + return IsNumber(s.Bytes()) +} + +// Only works for single-byte encodings (ASCII, ISO-8859-1) and UTF-8. +func isDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +// IsNumber determines if the string is a number or not. +// Only works for single-byte encodings (ASCII, ISO-8859-1) and UTF-8. +func IsNumber(s []byte) (isNum bool, isReal bool) { + if len(s) == 0 { + return false, false + } + i := 0 + if s[i] == '-' || s[i] == '+' { // sign + i++ + } + // Nor Hexadecimal nor octal supported + digit := false + for ; len(s) != i && isDigit(s[i]); i++ { + digit = true + } + if len(s) == i { // integer "[-+]?\d*" + return digit, false + } + if s[i] == '.' { // real + for i++; len(s) != i && isDigit(s[i]); i++ { // digit(s) optional + digit = true + } + } + if len(s) == i { // real "[-+]?\d*\.\d*" + if digit { + return true, true + } + // "[-+]?\." is not a number + return false, false + } + if s[i] == 'e' || s[i] == 'E' { // exponent + i++ + if !digit || len(s) == i { // nor "[-+]?\.?e" nor "[-+]?\d*\.?\d*e" is a number + return false, false + } + if s[i] == '-' || s[i] == '+' { // sign + i++ + } + if len(s) == i || !isDigit(s[i]) { // one digit expected + return false, false + } + for i++; len(s) != i && isDigit(s[i]); i++ { + } + } + if len(s) == i { + return true, true + } + return false, false +} diff --git a/vendor/github.com/gwenn/yacr/util.go b/vendor/github.com/gwenn/yacr/util.go new file mode 100644 index 0000000..b78e4d4 --- /dev/null +++ b/vendor/github.com/gwenn/yacr/util.go @@ -0,0 +1,53 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package yacr + +import ( + "compress/bzip2" + "compress/gzip" + "io" + "io/ioutil" + "os" + "path" +) + +type zReadCloser struct { + f *os.File + rd io.ReadCloser +} + +// Zopen transparently opens gzip/bzip files (based on their extension). +func Zopen(filepath string) (io.ReadCloser, error) { + f, err := os.Open(filepath) + if err != nil { + return nil, err + } + var rd io.ReadCloser + // TODO zip + ext := path.Ext(f.Name()) + if ext == ".gz" { + rd, err = gzip.NewReader(f) + if err != nil { + _ = f.Close() + return nil, err + } + } else if ext == ".bz2" { + rd = ioutil.NopCloser(bzip2.NewReader(f)) + } else { + rd = f + } + return &zReadCloser{f, rd}, nil +} +func (z *zReadCloser) Read(b []byte) (n int, err error) { + return z.rd.Read(b) +} +func (z *zReadCloser) Close() (err error) { + err = z.rd.Close() + if err != nil { + _ = z.f.Close() + return + } + return z.f.Close() +} diff --git a/vendor/github.com/gwenn/yacr/writer.go b/vendor/github.com/gwenn/yacr/writer.go new file mode 100644 index 0000000..3766ee8 --- /dev/null +++ b/vendor/github.com/gwenn/yacr/writer.go @@ -0,0 +1,212 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package yacr + +import ( + "bufio" + "encoding" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Writer provides an interface for writing CSV data +// (compatible with rfc4180 and extended with the option of having a separator other than ","). +// Successive calls to the Write method will automatically insert the separator. +// The EndOfRecord method tells when a line break is inserted. +type Writer struct { + b *bufio.Writer + sep byte // values separator + quoted bool // specify if values should be quoted (when they contain a separator, a double-quote or a newline) + sor bool // true at start of record + err error // sticky error. + bs []byte // byte slice used to write string with minimal/no alloc/copy + hb *reflect.SliceHeader // header of bs + + UseCRLF bool // True to use \r\n as the line terminator +} + +// DefaultWriter creates a "standard" CSV writer (separator is comma and quoted mode active) +func DefaultWriter(wr io.Writer) *Writer { + return NewWriter(wr, ',', true) +} + +// NewWriter returns a new CSV writer. +func NewWriter(w io.Writer, sep byte, quoted bool) *Writer { + wr := &Writer{b: bufio.NewWriter(w), sep: sep, quoted: quoted, sor: true} + wr.hb = (*reflect.SliceHeader)(unsafe.Pointer(&wr.bs)) + return wr +} + +// WriteRecord ensures that values are quoted when needed. +// It's like fmt.Println. +func (w *Writer) WriteRecord(values ...interface{}) bool { + for _, v := range values { + if !w.WriteValue(v) { + return false + } + } + w.EndOfRecord() + return w.err == nil +} + +// WriteValue ensures that value is quoted when needed. +// Value's type/kind is used to encode value to text. +func (w *Writer) WriteValue(value interface{}) bool { + switch value := value.(type) { + case nil: + return w.Write([]byte{}) + case string: + return w.WriteString(value) + case int: + return w.WriteString(strconv.Itoa(value)) + case int32: + return w.WriteString(strconv.FormatInt(int64(value), 10)) + case int64: + return w.WriteString(strconv.FormatInt(value, 10)) + case bool: + return w.WriteString(strconv.FormatBool(value)) + case float32: + return w.WriteString(strconv.FormatFloat(float64(value), 'f', -1, 32)) + case float64: + return w.WriteString(strconv.FormatFloat(value, 'f', -1, 64)) + case []byte: + return w.Write(value) + case encoding.TextMarshaler: // time.Time + if text, err := value.MarshalText(); err != nil { + w.setErr(err) + w.Write([]byte{}) // TODO Validate: write an empty field + return false + } else { + return w.Write(text) // please, ignore golint + } + default: + return w.writeReflect(value) + } +} + +// WriteReflect ensures that value is quoted when needed. +// Value's (reflect) Kind is used to encode value to text. +func (w *Writer) writeReflect(value interface{}) bool { + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.String: + return w.WriteString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return w.WriteString(strconv.FormatInt(v.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return w.WriteString(strconv.FormatUint(v.Uint(), 10)) + case reflect.Bool: + return w.WriteString(strconv.FormatBool(v.Bool())) + case reflect.Float32, reflect.Float64: + return w.WriteString(strconv.FormatFloat(v.Float(), 'f', -1, v.Type().Bits())) + default: + w.setErr(fmt.Errorf("unsupported type: %T, %v", value, value)) + w.Write([]byte{}) // TODO Validate: write an empty field + return false + } +} + +// WriteString ensures that value is quoted when needed. +func (w *Writer) WriteString(value string) bool { + // To avoid making a copy... + hs := (*reflect.StringHeader)(unsafe.Pointer(&value)) + w.hb.Data = hs.Data + w.hb.Len = hs.Len + w.hb.Cap = hs.Len + return w.Write(w.bs) +} + +var ( + // ErrNewLine is the error returned when a value contains a newline in unquoted mode. + ErrNewLine = errors.New("yacr.Writer: newline character in value") + // ErrSeparator is the error returned when a value contains a separator in unquoted mode. + ErrSeparator = errors.New("yacr.Writer: separator in value") +) + +// Write ensures that value is quoted when needed. +func (w *Writer) Write(value []byte) bool { + if w.err != nil { + return false + } + if !w.sor { + w.setErr(w.b.WriteByte(w.sep)) + } + // In quoted mode, value is enclosed between quotes if it contains sep, quote or \n. + if w.quoted { + last := 0 + for i, c := range value { + switch c { + case '"', '\r', '\n', w.sep: + default: + continue + } + if last == 0 { + w.setErr(w.b.WriteByte('"')) + } + if _, err := w.b.Write(value[last : i+1]); err != nil { + w.setErr(err) + } + if c == '"' { + w.setErr(w.b.WriteByte(c)) // escaped with another double quote + } + last = i + 1 + } + if _, err := w.b.Write(value[last:]); err != nil { + w.setErr(err) + } + if last != 0 { + w.setErr(w.b.WriteByte('"')) + } + } else { + // check that value does not contain sep or \n + for _, c := range value { + switch c { + case '\n': + w.setErr(ErrNewLine) + return false + case w.sep: + w.setErr(ErrSeparator) + return false + default: + continue + } + } + if _, err := w.b.Write(value); err != nil { + w.setErr(err) + } + } + w.sor = false + return w.err == nil +} + +// EndOfRecord tells when a line break must be inserted. +func (w *Writer) EndOfRecord() { + if w.UseCRLF { + w.setErr(w.b.WriteByte('\r')) + } + w.setErr(w.b.WriteByte('\n')) + w.sor = true +} + +// Flush ensures the writer's buffer is flushed. +func (w *Writer) Flush() { + w.setErr(w.b.Flush()) +} + +// Err returns the first error that was encountered by the Writer. +func (w *Writer) Err() error { + return w.err +} + +// setErr records the first error encountered. +func (w *Writer) setErr(err error) { + if w.err == nil { + w.err = err + } +} diff --git a/vendor/github.com/hectane/go-nonblockingchan/.travis.yml b/vendor/github.com/hectane/go-nonblockingchan/.travis.yml new file mode 100644 index 0000000..f4f458a --- /dev/null +++ b/vendor/github.com/hectane/go-nonblockingchan/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - 1.5 + - tip diff --git a/vendor/github.com/hectane/go-nonblockingchan/LICENSE.txt b/vendor/github.com/hectane/go-nonblockingchan/LICENSE.txt new file mode 100644 index 0000000..fb72c83 --- /dev/null +++ b/vendor/github.com/hectane/go-nonblockingchan/LICENSE.txt @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2015 Nathan Osman + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/hectane/go-nonblockingchan/README.md b/vendor/github.com/hectane/go-nonblockingchan/README.md new file mode 100644 index 0000000..a463000 --- /dev/null +++ b/vendor/github.com/hectane/go-nonblockingchan/README.md @@ -0,0 +1,36 @@ +## go-nonblockingchan + +[![Build Status](https://travis-ci.org/hectane/go-nonblockingchan.svg?branch=master)](https://travis-ci.org/hectane/go-nonblockingchan) +[![GoDoc](https://godoc.org/github.com/hectane/go-nonblockingchan?status.svg)](https://godoc.org/github.com/hectane/go-nonblockingchan) +[![MIT License](http://img.shields.io/badge/license-MIT-9370d8.svg?style=flat)](http://opensource.org/licenses/MIT) + +A special type that mimics the behavior of a channel but does not block when items are sent. + +### Features + +- Send items without ever worrying that the send will block +- Check how many items are waiting to be received +- Synchronized access to members - use it from any goroutine + +### Usage + +To use the package, add the following import: + + import "github.com/hectane/go-nonblockingchan" + +Use the `New()` function to create a new instance: + + c := nbc.New() + +To send an item on the channel, use the `Send` field: + + c.Send <- true + +Sending will always succeed immediately. The item will be added to an internal buffer until it is received: + + v, ok := <-c.Recv + if ok { + // value was received + } else { + // channel was closed + } diff --git a/vendor/github.com/hectane/go-nonblockingchan/chan.go b/vendor/github.com/hectane/go-nonblockingchan/chan.go new file mode 100644 index 0000000..3dfc650 --- /dev/null +++ b/vendor/github.com/hectane/go-nonblockingchan/chan.go @@ -0,0 +1,73 @@ +// Non-blocking channel for Go. +package nbc + +import ( + "container/list" + "sync" +) + +// Special type that mimics the behavior of a channel but does not block when +// items are sent. Items are stored internally until received. Closing the Send +// channel will cause the Recv channel to be closed after all items have been +// received. +type NonBlockingChan struct { + mutex sync.Mutex + Send chan<- interface{} + Recv <-chan interface{} + items *list.List +} + +// Loop for buffering items between the Send and Recv channels until the Send +// channel is closed. +func (n *NonBlockingChan) run(send <-chan interface{}, recv chan<- interface{}) { + for { + if send == nil && n.items.Len() == 0 { + close(recv) + break + } + var ( + recvChan chan<- interface{} + recvVal interface{} + ) + if n.items.Len() > 0 { + recvChan = recv + recvVal = n.items.Front().Value + } + select { + case i, ok := <-send: + if ok { + n.mutex.Lock() + n.items.PushBack(i) + n.mutex.Unlock() + } else { + send = nil + } + case recvChan <- recvVal: + n.mutex.Lock() + n.items.Remove(n.items.Front()) + n.mutex.Unlock() + } + } +} + +// Create a new non-blocking channel. +func New() *NonBlockingChan { + var ( + send = make(chan interface{}) + recv = make(chan interface{}) + n = &NonBlockingChan{ + Send: send, + Recv: recv, + items: list.New(), + } + ) + go n.run(send, recv) + return n +} + +// Retrieve the number of items waiting to be received. +func (n *NonBlockingChan) Len() int { + n.mutex.Lock() + defer n.mutex.Unlock() + return n.items.Len() +} diff --git a/vendor/github.com/hectane/hectane/LICENSE.txt b/vendor/github.com/hectane/hectane/LICENSE.txt new file mode 100644 index 0000000..fb72c83 --- /dev/null +++ b/vendor/github.com/hectane/hectane/LICENSE.txt @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2015 Nathan Osman + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/hectane/hectane/email/attachment.go b/vendor/github.com/hectane/hectane/email/attachment.go new file mode 100644 index 0000000..001fa66 --- /dev/null +++ b/vendor/github.com/hectane/hectane/email/attachment.go @@ -0,0 +1,48 @@ +package email + +import ( + "fmt" + "mime/multipart" + "mime/quotedprintable" + "net/textproto" +) + +// Email attachment. The content of the attachment is provided either as a +// UTF-8 string or as a Base64-encoded string ("encoded" set to "true"). +type Attachment struct { + Filename string `json:"filename"` + ContentType string `json:"content_type"` + Content string `json:"content"` + Encoded bool `json:"encoded"` +} + +// Write the attachment to the specified multipart writer. +func (a Attachment) Write(w *multipart.Writer) error { + headers := make(textproto.MIMEHeader) + if len(a.Filename) != 0 { + headers.Add("Content-Type", fmt.Sprintf("%s; name=%s", a.ContentType, a.Filename)) + } else { + headers.Add("Content-Type", a.ContentType) + } + if a.Encoded { + headers.Add("Content-Transfer-Encoding", "base64") + } else { + headers.Add("Content-Transfer-Encoding", "quoted-printable") + } + p, err := w.CreatePart(headers) + if err != nil { + return err + } + if a.Encoded { + if _, err := p.Write([]byte(a.Content)); err != nil { + return err + } + } else { + q := quotedprintable.NewWriter(p) + if _, err := q.Write([]byte(a.Content)); err != nil { + return err + } + return q.Close() + } + return nil +} diff --git a/vendor/github.com/hectane/hectane/email/email.go b/vendor/github.com/hectane/hectane/email/email.go new file mode 100644 index 0000000..d626997 --- /dev/null +++ b/vendor/github.com/hectane/hectane/email/email.go @@ -0,0 +1,143 @@ +package email + +import ( + "github.com/hectane/hectane/queue" + "github.com/kennygrant/sanitize" + + "bytes" + "fmt" + "io" + "mime/multipart" + "net/mail" + "net/textproto" + "strings" + "time" +) + +// Abstract representation of an email. +type Email struct { + From string `json:"from"` + To []string `json:"to"` + Cc []string `json:"cc"` + Bcc []string `json:"bcc"` + Subject string `json:"subject"` + Headers Headers `json:"headers"` + Text string `json:"text"` + Html string `json:"html"` + Attachments []Attachment `json:"attachments"` +} + +// Write the headers for the email to the specified writer. +func (e *Email) writeHeaders(w io.Writer, id, boundary string) error { + headers := Headers{ + "Message-Id": fmt.Sprintf("<%s@hectane>", id), + "From": e.From, + "To": strings.Join(e.To, ", "), + "Subject": e.Subject, + "Date": time.Now().Format("Mon, 02 Jan 2006 15:04:05 -0700"), + "MIME-Version": "1.0", + "Content-Type": fmt.Sprintf("multipart/mixed; boundary=%s", boundary), + } + for k, v := range e.Headers { + headers[k] = v + } + if len(e.Cc) > 0 { + headers["Cc"] = strings.Join(e.Cc, ", ") + } + return headers.Write(w) +} + +// Write the body of the email to the specified writer. +func (e *Email) writeBody(w *multipart.Writer) error { + var ( + buff = &bytes.Buffer{} + altWriter = multipart.NewWriter(buff) + ) + p, err := w.CreatePart(textproto.MIMEHeader{ + "Content-Type": []string{ + fmt.Sprintf("multipart/alternative; boundary=%s", altWriter.Boundary()), + }, + }) + if err != nil { + return err + } + if e.Text == "" { + e.Text = sanitize.HTML(e.Html) + } + if e.Html == "" { + e.Html = toHTML(e.Text) + } + if err := (Attachment{ + ContentType: "text/plain; charset=utf-8", + Content: e.Text, + }.Write(altWriter)); err != nil { + return err + } + if err := (Attachment{ + ContentType: "text/html; charset=utf-8", + Content: e.Html, + }.Write(altWriter)); err != nil { + return err + } + if err := altWriter.Close(); err != nil { + return err + } + if _, err := io.Copy(p, buff); err != nil { + return err + } + return nil +} + +// Create an array of messages with the specified body. +func (e *Email) newMessages(s *queue.Storage, from, body string) ([]*queue.Message, error) { + addresses := append(append(e.To, e.Cc...), e.Bcc...) + m, err := GroupAddressesByHost(addresses) + if err != nil { + return nil, err + } + messages := make([]*queue.Message, 0, 1) + for h, to := range m { + msg := &queue.Message{ + Host: h, + From: from, + To: to, + } + if err := s.SaveMessage(msg, body); err != nil { + return nil, err + } + messages = append(messages, msg) + } + return messages, nil +} + +// Convert the email into an array of messages grouped by host suitable for +// delivery to the mail queue. +func (e *Email) Messages(s *queue.Storage) ([]*queue.Message, error) { + from, err := mail.ParseAddress(e.From) + if err != nil { + return nil, err + } + w, body, err := s.NewBody() + if err != nil { + return nil, err + } + mpWriter := multipart.NewWriter(w) + if err := e.writeHeaders(w, body, mpWriter.Boundary()); err != nil { + return nil, err + } + if err := e.writeBody(mpWriter); err != nil { + return nil, err + } + for _, a := range e.Attachments { + if err := a.Write(mpWriter); err != nil { + return nil, err + } + } + if err := mpWriter.Close(); err != nil { + return nil, err + } + if err := w.Close(); err != nil { + return nil, err + } + return e.newMessages(s, from.Address, body) +} diff --git a/vendor/github.com/hectane/hectane/email/headers.go b/vendor/github.com/hectane/hectane/email/headers.go new file mode 100644 index 0000000..04ceb53 --- /dev/null +++ b/vendor/github.com/hectane/hectane/email/headers.go @@ -0,0 +1,20 @@ +package email + +import ( + "fmt" + "io" +) + +// Map of email headers. +type Headers map[string]string + +// Write the headers to the specified io.Writer. +func (e Headers) Write(w io.Writer) error { + for k, v := range e { + if _, err := w.Write([]byte(fmt.Sprintf("%s: %s\r\n", k, v))); err != nil { + return err + } + } + _, err := w.Write([]byte("\r\n")) + return err +} diff --git a/vendor/github.com/hectane/hectane/email/util.go b/vendor/github.com/hectane/hectane/email/util.go new file mode 100644 index 0000000..9ca4711 --- /dev/null +++ b/vendor/github.com/hectane/hectane/email/util.go @@ -0,0 +1,44 @@ +package email + +import ( + "fmt" + "html" + "net/mail" + "regexp" + "strings" +) + +// The link RegExp is adapted from http://stackoverflow.com/a/3809435/193619. +var ( + replaceCRLF = regexp.MustCompile(`\r?\n`) + replaceLinks = regexp.MustCompile(`https?:\/\/[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\b(?:[-a-zA-Z0-9@:%_\+.~#?&//=]*)`) +) + +// Group a list of email addresses by their host. An error will be returned if +// any of the addresses are invalid. +func GroupAddressesByHost(addrs []string) (map[string][]string, error) { + m := make(map[string][]string) + for _, addr := range addrs { + a, err := mail.ParseAddress(addr) + if err != nil { + return nil, err + } + parts := strings.Split(a.Address, "@") + if m[parts[1]] == nil { + m[parts[1]] = make([]string, 0, 1) + } + m[parts[1]] = append(m[parts[1]], a.Address) + } + return m, nil +} + +// Convert the specified text to its HTML equivalent, preserving formatting +// where possible and converting URLs to elements. +func toHTML(data string) string { + data = html.EscapeString(data) + data = replaceCRLF.ReplaceAllString(data, "
") + data = replaceLinks.ReplaceAllStringFunc(data, func(m string) string { + return fmt.Sprintf("
%s", m, m) + }) + return data +} diff --git a/vendor/github.com/hectane/hectane/queue/config.go b/vendor/github.com/hectane/hectane/queue/config.go new file mode 100644 index 0000000..0f99742 --- /dev/null +++ b/vendor/github.com/hectane/hectane/queue/config.go @@ -0,0 +1,7 @@ +package queue + +// Application configuration. +type Config struct { + Directory string `json:"directory"` + DisableSSLVerification bool `json:"disable-ssl-verification"` +} diff --git a/vendor/github.com/hectane/hectane/queue/host.go b/vendor/github.com/hectane/hectane/queue/host.go new file mode 100644 index 0000000..579205b --- /dev/null +++ b/vendor/github.com/hectane/hectane/queue/host.go @@ -0,0 +1,296 @@ +package queue + +import ( + "github.com/Sirupsen/logrus" + "github.com/hectane/go-nonblockingchan" + + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/mail" + "net/smtp" + "net/textproto" + "strings" + "sync" + "syscall" + "time" +) + +// Host status information. +type HostStatus struct { + Active bool `json:"active"` + Length int `json:"length"` +} + +// Persistent connection to an SMTP host. +type Host struct { + m sync.Mutex + config *Config + storage *Storage + log *logrus.Entry + host string + newMessage *nbc.NonBlockingChan + lastActivity time.Time + stop chan bool +} + +// Receive the next message in the queue. The host queue is considered +// "inactive" while waiting for new messages to arrive. The current time is +// recorded before entering the select{} block so that the Idle() method can +// calculate the idle time. +func (h *Host) receiveMessage() *Message { + h.m.Lock() + h.lastActivity = time.Now() + h.m.Unlock() + defer func() { + h.m.Lock() + h.lastActivity = time.Time{} + h.m.Unlock() + }() + for { + select { + case i := <-h.newMessage.Recv: + return i.(*Message) + case <-h.stop: + return nil + } + } +} + +// Parse an email address and extract the hostname. +func (h *Host) parseHostname(addr string) (string, error) { + a, err := mail.ParseAddress(addr) + if err != nil { + return "", err + } + return strings.Split(a.Address, "@")[1], nil +} + +// Attempt to connect to the specified server. The connection attempt is +// performed in a separate goroutine, allowing it to be aborted if the host +// queue is shut down. +func (h *Host) tryMailServer(server, hostname string) (*smtp.Client, error) { + var ( + c *smtp.Client + err error + done = make(chan bool) + ) + go func() { + c, err = smtp.Dial(fmt.Sprintf("%s:25", server)) + close(done) + }() + select { + case <-done: + case <-h.stop: + return nil, nil + } + if err != nil { + return nil, err + } + if err := c.Hello(hostname); err != nil { + return nil, err + } + if ok, _ := c.Extension("STARTTLS"); ok { + config := &tls.Config{ServerName: server} + if h.config.DisableSSLVerification { + config.InsecureSkipVerify = true + } + if err := c.StartTLS(config); err != nil { + return nil, err + } + } + return c, nil +} + +// Attempt to find the mail servers for the specified host. MX records are +// checked first. If one or more were found, the records are converted into an +// array of strings (sorted by priority). If none were found, the original host +// is returned. +func (h *Host) findMailServers(host string) []string { + r, err := net.LookupMX(host) + if err != nil { + return []string{host} + } + servers := make([]string, len(r)) + for i, r := range r { + servers[i] = strings.TrimSuffix(r.Host, ".") + } + return servers +} + +// Attempt to connect to one of the mail servers. +func (h *Host) connectToMailServer(hostname string) (*smtp.Client, error) { + for _, s := range h.findMailServers(h.host) { + c, err := h.tryMailServer(s, hostname) + if err != nil { + h.log.Debugf("unable to connect to %s", s) + continue + } + return c, nil + } + return nil, errors.New("unable to connect to a mail server") +} + +// Attempt to send the specified message to the specified client. +func (h *Host) deliverToMailServer(c *smtp.Client, m *Message) error { + r, err := h.storage.GetMessageBody(m) + if err != nil { + return err + } + defer r.Close() + if err := c.Mail(m.From); err != nil { + return err + } + for _, t := range m.To { + if err := c.Rcpt(t); err != nil { + return err + } + } + w, err := c.Data() + if err != nil { + return err + } + defer w.Close() + if _, err := io.Copy(w, r); err != nil { + return err + } + return nil +} + +// Receive message and deliver them to their recipients. Due to the complicated +// algorithm for message delivery, the body of the method is broken up into a +// sequence of labeled sections. +func (h *Host) run() { + defer close(h.stop) + var ( + m *Message + hostname string + c *smtp.Client + err error + tries int + duration = time.Minute + ) +receive: + if m == nil { + m = h.receiveMessage() + if m == nil { + goto shutdown + } + h.log.Info("message received in queue") + } + hostname, err = h.parseHostname(m.From) + if err != nil { + h.log.Error(err.Error()) + goto cleanup + } +deliver: + if c == nil { + h.log.Debug("connecting to mail server") + c, err = h.connectToMailServer(hostname) + if c == nil { + if err != nil { + h.log.Error(err) + goto wait + } else { + goto shutdown + } + } + h.log.Debug("connection established") + } + err = h.deliverToMailServer(c, m) + if err != nil { + h.log.Error(err) + if _, ok := err.(syscall.Errno); ok { + c = nil + goto deliver + } + if e, ok := err.(*textproto.Error); ok { + if e.Code >= 400 && e.Code <= 499 { + c.Close() + c = nil + goto wait + } + c.Reset() + } + h.log.Error(err.Error()) + goto cleanup + } + h.log.Info("message delivered successfully") +cleanup: + h.log.Debug("deleting message from disk") + err = h.storage.DeleteMessage(m) + if err != nil { + h.log.Error(err.Error()) + } + m = nil + tries = 0 + goto receive +wait: + // We differ a tiny bit from the RFC spec here but this should work well + // enough - the goal is to retry lots of times early on and space out the + // remaining attempts as time goes on. (Roughly 48 hours total.) + switch { + case tries < 8: + duration *= 2 + case tries < 18: + default: + h.log.Error("maximum retry count exceeded") + goto cleanup + } + select { + case <-h.stop: + case <-time.After(duration): + goto receive + } + tries++ +shutdown: + h.log.Debug("shutting down") + if c != nil { + c.Close() + } +} + +// Create a new host connection. +func NewHost(host string, s *Storage, c *Config) *Host { + h := &Host{ + config: c, + storage: s, + log: logrus.WithField("context", host), + host: host, + newMessage: nbc.New(), + stop: make(chan bool), + } + go h.run() + return h +} + +// Attempt to deliver a message to the host. +func (h *Host) Deliver(m *Message) { + h.newMessage.Send <- m +} + +// Retrieve the connection idle time. +func (h *Host) Idle() time.Duration { + h.m.Lock() + defer h.m.Unlock() + if h.lastActivity.IsZero() { + return 0 + } + return time.Since(h.lastActivity) +} + +// Return the status of the host connection. +func (h *Host) Status() *HostStatus { + return &HostStatus{ + Active: h.Idle() == 0, + Length: h.newMessage.Len(), + } +} + +// Close the connection to the host. +func (h *Host) Stop() { + h.stop <- true + <-h.stop +} diff --git a/vendor/github.com/hectane/hectane/queue/queue.go b/vendor/github.com/hectane/hectane/queue/queue.go new file mode 100644 index 0000000..5504003 --- /dev/null +++ b/vendor/github.com/hectane/hectane/queue/queue.go @@ -0,0 +1,127 @@ +package queue + +import ( + "github.com/Sirupsen/logrus" + + "time" +) + +// Queue status information. +type QueueStatus struct { + Uptime int `json:"uptime"` + Hosts map[string]*HostStatus `json:"hosts"` +} + +// Mail queue managing the sending of messages to hosts. +type Queue struct { + config *Config + Storage *Storage + log *logrus.Entry + hosts map[string]*Host + newMessage chan *Message + getStats chan chan *QueueStatus + stop chan bool +} + +// Deliver the specified message to the appropriate host queue. +func (q *Queue) deliverMessage(m *Message) { + if _, ok := q.hosts[m.Host]; !ok { + q.hosts[m.Host] = NewHost(m.Host, q.Storage, q.config) + } + q.hosts[m.Host].Deliver(m) +} + +// Generate stats for the queue. This is done by obtaining the information +// asynchronously and delivering it on the supplied channel when available. +func (q *Queue) stats(c chan *QueueStatus, startTime time.Time) { + go func() { + s := &QueueStatus{ + Uptime: int(time.Now().Sub(startTime) / time.Second), + Hosts: map[string]*HostStatus{}, + } + for n, h := range q.hosts { + s.Hosts[n] = h.Status() + } + c <- s + close(c) + }() +} + +// Check for inactive host queues and shut them down. +func (q *Queue) checkForInactiveQueues() { + for n, h := range q.hosts { + if h.Idle() > time.Minute { + h.Stop() + delete(q.hosts, n) + } + } +} + +// Receive new messages and deliver them to the specified host queue. Check for +// idle queues every so often and shut them down if they haven't been used. +func (q *Queue) run() { + defer close(q.stop) + startTime := time.Now() + ticker := time.NewTicker(time.Minute) + defer ticker.Stop() +loop: + for { + select { + case m := <-q.newMessage: + q.deliverMessage(m) + case c := <-q.getStats: + q.stats(c, startTime) + case <-ticker.C: + q.checkForInactiveQueues() + case <-q.stop: + break loop + } + } + q.log.Info("stopping host queues") + for h := range q.hosts { + q.hosts[h].Stop() + } + q.log.Info("shutting down") +} + +// Create a new message queue. Any undelivered messages on disk will be added +// to the appropriate queue. +func NewQueue(c *Config) (*Queue, error) { + q := &Queue{ + config: c, + Storage: NewStorage(c.Directory), + log: logrus.WithField("context", "Queue"), + hosts: make(map[string]*Host), + newMessage: make(chan *Message), + getStats: make(chan chan *QueueStatus), + stop: make(chan bool), + } + messages, err := q.Storage.LoadMessages() + if err != nil { + return nil, err + } + q.log.Infof("loaded %d message(s) from %s", len(messages), c.Directory) + for _, m := range messages { + q.deliverMessage(m) + } + go q.run() + return q, nil +} + +// Provide the status of each host queue. +func (q *Queue) Status() *QueueStatus { + c := make(chan *QueueStatus) + q.getStats <- c + return <-c +} + +// Deliver the specified message to the appropriate host queue. +func (q *Queue) Deliver(m *Message) { + q.newMessage <- m +} + +// Stop all active host queues. +func (q *Queue) Stop() { + q.stop <- true + <-q.stop +} diff --git a/vendor/github.com/hectane/hectane/queue/storage.go b/vendor/github.com/hectane/hectane/queue/storage.go new file mode 100644 index 0000000..d1aa414 --- /dev/null +++ b/vendor/github.com/hectane/hectane/queue/storage.go @@ -0,0 +1,160 @@ +package queue + +import ( + "github.com/pborman/uuid" + + "encoding/json" + "io" + "io/ioutil" + "os" + "path" + "strings" + "sync" +) + +const ( + bodyFilename = "body" + messageExtension = ".message" +) + +// Message metadata. +type Message struct { + id string + body string + Host string + From string + To []string +} + +// Manager for message metadata and body on disk. All methods are safe to call +// from multiple goroutines. +type Storage struct { + m sync.Mutex + directory string +} + +// Determine the path to the directory containing the specified body. +func (s *Storage) bodyDirectory(body string) string { + return path.Join(s.directory, body) +} + +// Determine the filename of the specified body. +func (s *Storage) bodyFilename(body string) string { + return path.Join(s.bodyDirectory(body), bodyFilename) +} + +// Determine the filename of the specified message. +func (s *Storage) messageFilename(m *Message) string { + return path.Join(s.directory, m.body, m.id) + messageExtension +} + +// Load all messages with the specified body. +func (s *Storage) loadMessages(body string) []*Message { + messages := make([]*Message, 0, 1) + if files, err := ioutil.ReadDir(s.bodyDirectory(body)); err == nil { + for _, f := range files { + if strings.HasSuffix(f.Name(), messageExtension) { + m := &Message{ + id: strings.TrimSuffix(f.Name(), messageExtension), + body: body, + } + if r, err := os.Open(s.messageFilename(m)); err == nil { + if err := json.NewDecoder(r).Decode(m); err == nil { + messages = append(messages, m) + } + r.Close() + } + } + } + } + return messages +} + +// Create a Storage instance for the specified directory. +func NewStorage(directory string) *Storage { + return &Storage{ + directory: directory, + } +} + +// Create a new message body. The writer must be closed after writing the +// message body. +func (s *Storage) NewBody() (io.WriteCloser, string, error) { + body := uuid.New() + if err := os.MkdirAll(s.bodyDirectory(body), 0700); err != nil { + return nil, "", err + } + w, err := os.OpenFile(s.bodyFilename(body), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return nil, "", err + } + return w, body, nil +} + +// Load messages from the storage directory. Any messages that could not be +// loaded are ignored. +func (s *Storage) LoadMessages() ([]*Message, error) { + directories, err := ioutil.ReadDir(s.directory) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + return []*Message{}, nil + } + var messages []*Message + for _, d := range directories { + if d.IsDir() { + if _, err := os.Stat(s.bodyFilename(d.Name())); err == nil { + messages = append(messages, s.loadMessages(d.Name())...) + } + } + } + return messages, nil +} + +// Save the specified message to disk. +func (s *Storage) SaveMessage(m *Message, body string) error { + s.m.Lock() + defer s.m.Unlock() + m.id = uuid.New() + m.body = body + w, err := os.OpenFile(s.messageFilename(m), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer w.Close() + if err := json.NewEncoder(w).Encode(m); err != nil { + return err + } + return nil +} + +// Retreive a reader for the message body. +func (s *Storage) GetMessageBody(m *Message) (io.ReadCloser, error) { + s.m.Lock() + defer s.m.Unlock() + return os.Open(s.bodyFilename(m.body)) +} + +// Delete the specified message. The message body is also deleted if no more +// messages exist. +func (s *Storage) DeleteMessage(m *Message) error { + s.m.Lock() + defer s.m.Unlock() + if err := os.Remove(s.messageFilename(m)); err != nil { + return err + } + d, err := os.Open(s.bodyDirectory(m.body)) + if err != nil { + return err + } + defer d.Close() + e, err := d.Readdir(2) + if err != nil { + return err + } + if len(e) == 1 { + return os.RemoveAll(s.bodyDirectory(m.body)) + } + return nil +} diff --git a/vendor/github.com/jackc/pgx/.gitignore b/vendor/github.com/jackc/pgx/.gitignore new file mode 100644 index 0000000..cb0cd90 --- /dev/null +++ b/vendor/github.com/jackc/pgx/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +conn_config_test.go diff --git a/vendor/github.com/jackc/pgx/.travis.yml b/vendor/github.com/jackc/pgx/.travis.yml new file mode 100644 index 0000000..d9ea43b --- /dev/null +++ b/vendor/github.com/jackc/pgx/.travis.yml @@ -0,0 +1,60 @@ +language: go + +go: + - 1.7.4 + - 1.6.4 + - tip + +# Derived from https://github.com/lib/pq/blob/master/.travis.yml +before_install: + - sudo apt-get remove -y --purge postgresql libpq-dev libpq5 postgresql-client-common postgresql-common + - sudo rm -rf /var/lib/postgresql + - wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + - sudo sh -c "echo deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main $PGVERSION >> /etc/apt/sources.list.d/postgresql.list" + - sudo apt-get update -qq + - sudo apt-get -y -o Dpkg::Options::=--force-confdef -o Dpkg::Options::="--force-confnew" install postgresql-$PGVERSION postgresql-server-dev-$PGVERSION postgresql-contrib-$PGVERSION + - sudo chmod 777 /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "local all postgres trust" > /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "local all all trust" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "host all pgx_md5 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "host all pgx_pw 127.0.0.1/32 password" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "hostssl all pgx_ssl 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "host replication pgx_replication 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - echo "host pgx_test pgx_replication 127.0.0.1/32 md5" >> /etc/postgresql/$PGVERSION/main/pg_hba.conf + - sudo chmod 777 /etc/postgresql/$PGVERSION/main/postgresql.conf + - "[[ $PGVERSION < 9.6 ]] || echo \"wal_level='logical'\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf" + - "[[ $PGVERSION < 9.6 ]] || echo \"max_wal_senders=5\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf" + - "[[ $PGVERSION < 9.6 ]] || echo \"max_replication_slots=5\" >> /etc/postgresql/$PGVERSION/main/postgresql.conf" + - sudo /etc/init.d/postgresql restart + +env: + matrix: + - PGVERSION=9.6 + - PGVERSION=9.5 + - PGVERSION=9.4 + - PGVERSION=9.3 + - PGVERSION=9.2 + +# The tricky test user, below, has to actually exist so that it can be used in a test +# of aclitem formatting. It turns out aclitems cannot contain non-existing users/roles. +before_script: + - mv conn_config_test.go.travis conn_config_test.go + - psql -U postgres -c 'create database pgx_test' + - "[[ \"${PGVERSION}\" = '9.0' ]] && psql -U postgres -f /usr/share/postgresql/9.0/contrib/hstore.sql pgx_test || psql -U postgres pgx_test -c 'create extension hstore'" + - psql -U postgres -c "create user pgx_ssl SUPERUSER PASSWORD 'secret'" + - psql -U postgres -c "create user pgx_md5 SUPERUSER PASSWORD 'secret'" + - psql -U postgres -c "create user pgx_pw SUPERUSER PASSWORD 'secret'" + - psql -U postgres -c "create user pgx_replication with replication password 'secret'" + - psql -U postgres -c "create user \" tricky, ' } \"\" \\ test user \" superuser password 'secret'" + +install: + - go get -u github.com/shopspring/decimal + - go get -u gopkg.in/inconshreveable/log15.v2 + - go get -u github.com/jackc/fake + +script: + - go test -v -race -short ./... + +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/jackc/pgx/CHANGELOG.md b/vendor/github.com/jackc/pgx/CHANGELOG.md new file mode 100644 index 0000000..2fa56bb --- /dev/null +++ b/vendor/github.com/jackc/pgx/CHANGELOG.md @@ -0,0 +1,184 @@ +# 2.11.0 (June 5, 2017) + +## Fixes + +* Fix race with concurrent execution of stdlib.OpenFromConnPool (Terin Stock) + +## Features + +* .pgpass support (j7b) +* Add missing CopyFrom delegators to Tx and ConnPool (Jack Christensen) +* Add ParseConnectionString (James Lawrence) + +## Performance + +* Optimize HStore encoding (René Kroon) + +# 2.10.0 (March 17, 2017) + +## Fixes + +* Oid underlying type changed to uint32, previously it was incorrectly int32 (Manni Wood) +* Explicitly close checked-in connections on ConnPool.Reset, previously they were closed by GC + +## Features + +* Add xid type support (Manni Wood) +* Add cid type support (Manni Wood) +* Add tid type support (Manni Wood) +* Add "char" type support (Manni Wood) +* Add NullOid type (Manni Wood) +* Add json/jsonb binary support to allow use with CopyTo +* Add named error ErrAcquireTimeout (Alexander Staubo) +* Add logical replication decoding (Kris Wehner) +* Add PgxScanner interface to allow types to simultaneously support database/sql and pgx (Jack Christensen) +* Add CopyFrom with schema support (Jack Christensen) + +## Compatibility + +* jsonb now defaults to binary format. This means passing a []byte to a jsonb column will no longer work. +* CopyTo is now deprecated but will continue to work. + +# 2.9.0 (August 26, 2016) + +## Fixes + +* Fix *ConnPool.Deallocate() not deleting prepared statement from map +* Fix stdlib not logging unprepared query SQL (Krzysztof Dryś) +* Fix Rows.Values() with varchar binary format +* Concurrent ConnPool.Acquire calls with Dialer timeouts now timeout in the expected amount of time (Konstantin Dzreev) + +## Features + +* Add CopyTo +* Add PrepareEx +* Add basic record to []interface{} decoding +* Encode and decode between all Go and PostgreSQL integer types with bounds checking +* Decode inet/cidr to net.IP +* Encode/decode [][]byte to/from bytea[] +* Encode/decode named types whose underlying types are string, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64 + +## Performance + +* Substantial reduction in memory allocations + +# 2.8.1 (March 24, 2016) + +## Features + +* Scan accepts nil argument to ignore a column + +## Fixes + +* Fix compilation on 32-bit architecture +* Fix Tx.status not being set on error on Commit +* Fix Listen/Unlisten with special characters + +# 2.8.0 (March 18, 2016) + +## Fixes + +* Fix unrecognized commit failure +* Fix msgReader.rxMsg bug when msgReader already has error +* Go float64 can no longer be encoded to a PostgreSQL float4 +* Fix connection corruption when query with error is closed early + +## Features + +This release adds multiple extension points helpful when wrapping pgx with +custom application behavior. pgx can now use custom types designed for the +standard database/sql package such as +[github.com/shopspring/decimal](https://github.com/shopspring/decimal). + +* Add *Tx.AfterClose() hook +* Add *Tx.Conn() +* Add *Tx.Status() +* Add *Tx.Err() +* Add *Rows.AfterClose() hook +* Add *Rows.Conn() +* Add *Conn.SetLogger() to allow changing logger +* Add *Conn.SetLogLevel() to allow changing log level +* Add ConnPool.Reset method +* Add support for database/sql.Scanner and database/sql/driver.Valuer interfaces +* Rows.Scan errors now include which argument caused error +* Add Encode() to allow custom Encoders to reuse internal encoding functionality +* Add Decode() to allow customer Decoders to reuse internal decoding functionality +* Add ConnPool.Prepare method +* Add ConnPool.Deallocate method +* Add Scan to uint32 and uint64 (utrack) +* Add encode and decode to []uint16, []uint32, and []uint64 (Max Musatov) + +## Performance + +* []byte skips encoding/decoding + +# 2.7.1 (October 26, 2015) + +* Disable SSL renegotiation + +# 2.7.0 (October 16, 2015) + +* Add RuntimeParams to ConnConfig +* ParseURI extracts RuntimeParams +* ParseDSN extracts RuntimeParams +* ParseEnvLibpq extracts PGAPPNAME +* Prepare is now idempotent +* Rows.Values now supports oid type +* ConnPool.Release automatically unlistens connections (Joseph Glanville) +* Add trace log level +* Add more efficient log leveling +* Retry automatically on ConnPool.Begin (Joseph Glanville) +* Encode from net.IP to inet and cidr +* Generalize encoding pointer to string to any PostgreSQL type +* Add UUID encoding from pointer to string (Joseph Glanville) +* Add null mapping to pointer to pointer (Jonathan Rudenberg) +* Add JSON and JSONB type support (Joseph Glanville) + +# 2.6.0 (September 3, 2015) + +* Add inet and cidr type support +* Add binary decoding to TimestampOid in stdlib driver (Samuel Stauffer) +* Add support for specifying sslmode in connection strings (Rick Snyder) +* Allow ConnPool to have MaxConnections of 1 +* Add basic PGSSLMODE to support to ParseEnvLibpq +* Add fallback TLS config +* Expose specific error for TSL refused +* More error details exposed in PgError +* Support custom dialer (Lewis Marshall) + +# 2.5.0 (April 15, 2015) + +* Fix stdlib nil support (Blaž Hrastnik) +* Support custom Scanner not reading entire value +* Fix empty array scanning (Laurent Debacker) +* Add ParseDSN (deoxxa) +* Add timestamp support to NullTime +* Remove unused text format scanners +* Return error when too many parameters on Prepare +* Add Travis CI integration (Jonathan Rudenberg) +* Large object support (Jonathan Rudenberg) +* Fix reading null byte arrays (Karl Seguin) +* Add timestamptz[] support +* Add timestamp[] support (Karl Seguin) +* Add bool[] support (Karl Seguin) +* Allow writing []byte into text and varchar columns without type conversion (Hari Bhaskaran) +* Fix ConnPool Close panic +* Add Listen / notify example +* Reduce memory allocations (Karl Seguin) + +# 2.4.0 (October 3, 2014) + +* Add per connection oid to name map +* Add Hstore support (Andy Walker) +* Move introductory docs to godoc from readme +* Fix documentation references to TextEncoder and BinaryEncoder +* Add keep-alive to TCP connections (Andy Walker) +* Add support for EmptyQueryResponse / Allow no-op Exec (Andy Walker) +* Allow reading any type into []byte +* WaitForNotification detects lost connections quicker + +# 2.3.0 (September 16, 2014) + +* Truncate logged strings and byte slices +* Extract more error information from PostgreSQL +* Fix data race with Rows and ConnPool diff --git a/vendor/github.com/jackc/pgx/LICENSE b/vendor/github.com/jackc/pgx/LICENSE new file mode 100644 index 0000000..7dee3da --- /dev/null +++ b/vendor/github.com/jackc/pgx/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Jack Christensen + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/jackc/pgx/README.md b/vendor/github.com/jackc/pgx/README.md new file mode 100644 index 0000000..0061c7f --- /dev/null +++ b/vendor/github.com/jackc/pgx/README.md @@ -0,0 +1,125 @@ +[![](https://godoc.org/github.com/jackc/pgx?status.svg)](https://godoc.org/github.com/jackc/pgx) + +# Pgx + +Pgx is a pure Go database connection library designed specifically for +PostgreSQL. Pgx is different from other drivers such as +[pq](http://godoc.org/github.com/lib/pq) because, while it can operate as a +database/sql compatible driver, pgx is primarily intended to be used directly. +It offers a native interface similar to database/sql that offers better +performance and more features. + +## Features + +Pgx supports many additional features beyond what is available through database/sql. + +* Listen / notify +* Transaction isolation level control +* Full TLS connection control +* Binary format support for custom types (can be much faster) +* Copy from protocol support for faster bulk data loads +* Logging support +* Configurable connection pool with after connect hooks to do arbitrary connection setup +* PostgreSQL array to Go slice mapping for integers, floats, and strings +* Hstore support +* JSON and JSONB support +* Maps inet and cidr PostgreSQL types to net.IPNet and net.IP +* Large object support +* Null mapping to Null* struct or pointer to pointer. +* Supports database/sql.Scanner and database/sql/driver.Valuer interfaces for custom types +* Logical replication connections, including receiving WAL and sending standby status updates + +## Performance + +Pgx performs roughly equivalent to [pq](http://godoc.org/github.com/lib/pq) and +[go-pg](https://github.com/go-pg/pg) for selecting a single column from a single +row, but it is substantially faster when selecting multiple entire rows (6893 +queries/sec for pgx vs. 3968 queries/sec for pq -- 73% faster). + +See this [gist](https://gist.github.com/jackc/d282f39e088b495fba3e) for the +underlying benchmark results or checkout +[go_db_bench](https://github.com/jackc/go_db_bench) to run tests for yourself. + +## database/sql + +Import the ```github.com/jackc/pgx/stdlib``` package to use pgx as a driver for +database/sql. It is possible to retrieve a pgx connection from database/sql on +demand. This allows using the database/sql interface in most places, but using +pgx directly when more performance or PostgreSQL specific features are needed. + +## Documentation + +pgx includes extensive documentation in the godoc format. It is viewable online at [godoc.org](https://godoc.org/github.com/jackc/pgx). + +## Testing + +pgx supports multiple connection and authentication types. Setting up a test +environment that can test all of them can be cumbersome. In particular, +Windows cannot test Unix domain socket connections. Because of this pgx will +skip tests for connection types that are not configured. + +### Normal Test Environment + +To setup the normal test environment, first install these dependencies: + + go get github.com/jackc/fake + go get github.com/shopspring/decimal + go get gopkg.in/inconshreveable/log15.v2 + +Then run the following SQL: + + create user pgx_md5 password 'secret'; + create user " tricky, ' } "" \ test user " password 'secret'; + create database pgx_test; + create user pgx_replication with replication password 'secret'; + +Connect to database pgx_test and run: + + create extension hstore; + +Next open conn_config_test.go.example and make a copy without the +.example. If your PostgreSQL server is accepting connections on 127.0.0.1, +then you are done. + +### Connection and Authentication Test Environment + +Complete the normal test environment setup and also do the following. + +Run the following SQL: + + create user pgx_none; + create user pgx_pw password 'secret'; + +Add the following to your pg_hba.conf: + +If you are developing on Unix with domain socket connections: + + local pgx_test pgx_none trust + local pgx_test pgx_pw password + local pgx_test pgx_md5 md5 + +If you are developing on Windows with TCP connections: + + host pgx_test pgx_none 127.0.0.1/32 trust + host pgx_test pgx_pw 127.0.0.1/32 password + host pgx_test pgx_md5 127.0.0.1/32 md5 + +### Replication Test Environment + +Add a replication user: + + create user pgx_replication with replication password 'secret'; + +Add a replication line to your pg_hba.conf: + + host replication pgx_replication 127.0.0.1/32 md5 + +Change the following settings in your postgresql.conf: + + wal_level=logical + max_wal_senders=5 + max_replication_slots=5 + +## Version Policy + +pgx follows semantic versioning for the documented public API on stable releases. Branch `v2` is the latest stable release. `master` can contain new features or behavior that will change or be removed before being merged to the stable `v2` branch (in practice, this occurs very rarely). diff --git a/vendor/github.com/jackc/pgx/conn.go b/vendor/github.com/jackc/pgx/conn.go new file mode 100644 index 0000000..a2d60e7 --- /dev/null +++ b/vendor/github.com/jackc/pgx/conn.go @@ -0,0 +1,1322 @@ +package pgx + +import ( + "bufio" + "crypto/md5" + "crypto/tls" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "net/url" + "os" + "os/user" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" +) + +// DialFunc is a function that can be used to connect to a PostgreSQL server +type DialFunc func(network, addr string) (net.Conn, error) + +// ConnConfig contains all the options used to establish a connection. +type ConnConfig struct { + Host string // host (e.g. localhost) or path to unix domain socket directory (e.g. /private/tmp) + Port uint16 // default: 5432 + Database string + User string // default: OS user name + Password string + TLSConfig *tls.Config // config for TLS connection -- nil disables TLS + UseFallbackTLS bool // Try FallbackTLSConfig if connecting with TLSConfig fails. Used for preferring TLS, but allowing unencrypted, or vice-versa + FallbackTLSConfig *tls.Config // config for fallback TLS connection (only used if UseFallBackTLS is true)-- nil disables TLS + Logger Logger + LogLevel int + Dial DialFunc + RuntimeParams map[string]string // Run-time parameters to set on connection as session default values (e.g. search_path or application_name) +} + +// Conn is a PostgreSQL connection handle. It is not safe for concurrent usage. +// Use ConnPool to manage access to multiple database connections from multiple +// goroutines. +type Conn struct { + conn net.Conn // the underlying TCP or unix domain socket connection + lastActivityTime time.Time // the last time the connection was used + reader *bufio.Reader // buffered reader to improve read performance + wbuf [1024]byte + writeBuf WriteBuf + Pid int32 // backend pid + SecretKey int32 // key to use to send a cancel query message to the server + RuntimeParams map[string]string // parameters that have been reported by the server + PgTypes map[Oid]PgType // oids to PgTypes + config ConnConfig // config used when establishing this connection + TxStatus byte + preparedStatements map[string]*PreparedStatement + channels map[string]struct{} + notifications []*Notification + alive bool + causeOfDeath error + logger Logger + logLevel int + mr msgReader + fp *fastpath + pgsqlAfInet *byte + pgsqlAfInet6 *byte + busy bool + poolResetCount int + preallocatedRows []Rows +} + +// PreparedStatement is a description of a prepared statement +type PreparedStatement struct { + Name string + SQL string + FieldDescriptions []FieldDescription + ParameterOids []Oid +} + +// PrepareExOptions is an option struct that can be passed to PrepareEx +type PrepareExOptions struct { + ParameterOids []Oid +} + +// Notification is a message received from the PostgreSQL LISTEN/NOTIFY system +type Notification struct { + Pid int32 // backend pid that sent the notification + Channel string // channel from which notification was received + Payload string +} + +// PgType is information about PostgreSQL type and how to encode and decode it +type PgType struct { + Name string // name of type e.g. int4, text, date + DefaultFormat int16 // default format (text or binary) this type will be requested in +} + +// CommandTag is the result of an Exec function +type CommandTag string + +// RowsAffected returns the number of rows affected. If the CommandTag was not +// for a row affecting command (such as "CREATE TABLE") then it returns 0 +func (ct CommandTag) RowsAffected() int64 { + s := string(ct) + index := strings.LastIndex(s, " ") + if index == -1 { + return 0 + } + n, _ := strconv.ParseInt(s[index+1:], 10, 64) + return n +} + +// Identifier a PostgreSQL identifier or name. Identifiers can be composed of +// multiple parts such as ["schema", "table"] or ["table", "column"]. +type Identifier []string + +// Sanitize returns a sanitized string safe for SQL interpolation. +func (ident Identifier) Sanitize() string { + parts := make([]string, len(ident)) + for i := range ident { + parts[i] = `"` + strings.Replace(ident[i], `"`, `""`, -1) + `"` + } + return strings.Join(parts, ".") +} + +// ErrNoRows occurs when rows are expected but none are returned. +var ErrNoRows = errors.New("no rows in result set") + +// ErrNotificationTimeout occurs when WaitForNotification times out. +var ErrNotificationTimeout = errors.New("notification timeout") + +// ErrDeadConn occurs on an attempt to use a dead connection +var ErrDeadConn = errors.New("conn is dead") + +// ErrTLSRefused occurs when the connection attempt requires TLS and the +// PostgreSQL server refuses to use TLS +var ErrTLSRefused = errors.New("server refused TLS connection") + +// ErrConnBusy occurs when the connection is busy (for example, in the middle of +// reading query results) and another action is attempts. +var ErrConnBusy = errors.New("conn is busy") + +// ErrInvalidLogLevel occurs on attempt to set an invalid log level. +var ErrInvalidLogLevel = errors.New("invalid log level") + +// ProtocolError occurs when unexpected data is received from PostgreSQL +type ProtocolError string + +func (e ProtocolError) Error() string { + return string(e) +} + +// Connect establishes a connection with a PostgreSQL server using config. +// config.Host must be specified. config.User will default to the OS user name. +// Other config fields are optional. +func Connect(config ConnConfig) (c *Conn, err error) { + return connect(config, nil, nil, nil) +} + +func connect(config ConnConfig, pgTypes map[Oid]PgType, pgsqlAfInet *byte, pgsqlAfInet6 *byte) (c *Conn, err error) { + c = new(Conn) + + c.config = config + + if pgTypes != nil { + c.PgTypes = make(map[Oid]PgType, len(pgTypes)) + for k, v := range pgTypes { + c.PgTypes[k] = v + } + } + + if pgsqlAfInet != nil { + c.pgsqlAfInet = new(byte) + *c.pgsqlAfInet = *pgsqlAfInet + } + if pgsqlAfInet6 != nil { + c.pgsqlAfInet6 = new(byte) + *c.pgsqlAfInet6 = *pgsqlAfInet6 + } + + if c.config.LogLevel != 0 { + c.logLevel = c.config.LogLevel + } else { + // Preserve pre-LogLevel behavior by defaulting to LogLevelDebug + c.logLevel = LogLevelDebug + } + c.logger = c.config.Logger + c.mr.log = c.log + c.mr.shouldLog = c.shouldLog + + if c.config.User == "" { + user, err := user.Current() + if err != nil { + return nil, err + } + c.config.User = user.Username + if c.shouldLog(LogLevelDebug) { + c.log(LogLevelDebug, "Using default connection config", "User", c.config.User) + } + } + + if c.config.Port == 0 { + c.config.Port = 5432 + if c.shouldLog(LogLevelDebug) { + c.log(LogLevelDebug, "Using default connection config", "Port", c.config.Port) + } + } + + network := "tcp" + address := fmt.Sprintf("%s:%d", c.config.Host, c.config.Port) + // See if host is a valid path, if yes connect with a socket + if _, err := os.Stat(c.config.Host); err == nil { + // For backward compatibility accept socket file paths -- but directories are now preferred + network = "unix" + address = c.config.Host + if !strings.Contains(address, "/.s.PGSQL.") { + address = filepath.Join(address, ".s.PGSQL.") + strconv.FormatInt(int64(c.config.Port), 10) + } + } + if c.config.Dial == nil { + c.config.Dial = (&net.Dialer{KeepAlive: 5 * time.Minute}).Dial + } + + if c.shouldLog(LogLevelInfo) { + c.log(LogLevelInfo, fmt.Sprintf("Dialing PostgreSQL server at %s address: %s", network, address)) + } + err = c.connect(config, network, address, config.TLSConfig) + if err != nil && config.UseFallbackTLS { + if c.shouldLog(LogLevelInfo) { + c.log(LogLevelInfo, fmt.Sprintf("Connect with TLSConfig failed, trying FallbackTLSConfig: %v", err)) + } + err = c.connect(config, network, address, config.FallbackTLSConfig) + } + + if err != nil { + if c.shouldLog(LogLevelError) { + c.log(LogLevelError, fmt.Sprintf("Connect failed: %v", err)) + } + return nil, err + } + + return c, nil +} + +func (c *Conn) connect(config ConnConfig, network, address string, tlsConfig *tls.Config) (err error) { + c.conn, err = c.config.Dial(network, address) + if err != nil { + return err + } + defer func() { + if c != nil && err != nil { + c.conn.Close() + c.alive = false + } + }() + + c.RuntimeParams = make(map[string]string) + c.preparedStatements = make(map[string]*PreparedStatement) + c.channels = make(map[string]struct{}) + c.alive = true + c.lastActivityTime = time.Now() + + if tlsConfig != nil { + if c.shouldLog(LogLevelDebug) { + c.log(LogLevelDebug, "Starting TLS handshake") + } + if err := c.startTLS(tlsConfig); err != nil { + return err + } + } + + c.reader = bufio.NewReader(c.conn) + c.mr.reader = c.reader + + msg := newStartupMessage() + + // Default to disabling TLS renegotiation. + // + // Go does not support (https://github.com/golang/go/issues/5742) + // PostgreSQL recommends disabling (http://www.postgresql.org/docs/9.4/static/runtime-config-connection.html#GUC-SSL-RENEGOTIATION-LIMIT) + if tlsConfig != nil { + msg.options["ssl_renegotiation_limit"] = "0" + } + + // Copy default run-time params + for k, v := range config.RuntimeParams { + msg.options[k] = v + } + + msg.options["user"] = c.config.User + if c.config.Database != "" { + msg.options["database"] = c.config.Database + } + + if err = c.txStartupMessage(msg); err != nil { + return err + } + + for { + var t byte + var r *msgReader + t, r, err = c.rxMsg() + if err != nil { + return err + } + + switch t { + case backendKeyData: + c.rxBackendKeyData(r) + case authenticationX: + if err = c.rxAuthenticationX(r); err != nil { + return err + } + case readyForQuery: + c.rxReadyForQuery(r) + if c.shouldLog(LogLevelInfo) { + c.log(LogLevelInfo, "Connection established") + } + + // Replication connections can't execute the queries to + // populate the c.PgTypes and c.pgsqlAfInet + if _, ok := msg.options["replication"]; ok { + return nil + } + + if c.PgTypes == nil { + err = c.loadPgTypes() + if err != nil { + return err + } + } + + if c.pgsqlAfInet == nil || c.pgsqlAfInet6 == nil { + err = c.loadInetConstants() + if err != nil { + return err + } + } + + return nil + default: + if err = c.processContextFreeMsg(t, r); err != nil { + return err + } + } + } +} + +func (c *Conn) loadPgTypes() error { + rows, err := c.Query(`select t.oid, t.typname +from pg_type t +left join pg_type base_type on t.typelem=base_type.oid +where ( + t.typtype='b' + and (base_type.oid is null or base_type.typtype='b') + ) + or t.typname in('record');`) + if err != nil { + return err + } + + c.PgTypes = make(map[Oid]PgType, 128) + + for rows.Next() { + var oid Oid + var t PgType + + rows.Scan(&oid, &t.Name) + + // The zero value is text format so we ignore any types without a default type format + t.DefaultFormat, _ = DefaultTypeFormats[t.Name] + + c.PgTypes[oid] = t + } + + return rows.Err() +} + +// Family is needed for binary encoding of inet/cidr. The constant is based on +// the server's definition of AF_INET. In theory, this could differ between +// platforms, so request an IPv4 and an IPv6 inet and get the family from that. +func (c *Conn) loadInetConstants() error { + var ipv4, ipv6 []byte + + err := c.QueryRow("select '127.0.0.1'::inet, '1::'::inet").Scan(&ipv4, &ipv6) + if err != nil { + return err + } + + c.pgsqlAfInet = &ipv4[0] + c.pgsqlAfInet6 = &ipv6[0] + + return nil +} + +// Close closes a connection. It is safe to call Close on a already closed +// connection. +func (c *Conn) Close() (err error) { + if !c.IsAlive() { + return nil + } + + wbuf := newWriteBuf(c, 'X') + wbuf.closeMsg() + + _, err = c.conn.Write(wbuf.buf) + + c.die(errors.New("Closed")) + if c.shouldLog(LogLevelInfo) { + c.log(LogLevelInfo, "Closed connection") + } + return err +} + +// ParseURI parses a database URI into ConnConfig +// +// Query parameters not used by the connection process are parsed into ConnConfig.RuntimeParams. +func ParseURI(uri string) (ConnConfig, error) { + var cp ConnConfig + + url, err := url.Parse(uri) + if err != nil { + return cp, err + } + + if url.User != nil { + cp.User = url.User.Username() + cp.Password, _ = url.User.Password() + } + + parts := strings.SplitN(url.Host, ":", 2) + cp.Host = parts[0] + if len(parts) == 2 { + p, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return cp, err + } + cp.Port = uint16(p) + } + cp.Database = strings.TrimLeft(url.Path, "/") + + err = configSSL(url.Query().Get("sslmode"), &cp) + if err != nil { + return cp, err + } + + ignoreKeys := map[string]struct{}{ + "sslmode": {}, + } + + cp.RuntimeParams = make(map[string]string) + + for k, v := range url.Query() { + if _, ok := ignoreKeys[k]; ok { + continue + } + + cp.RuntimeParams[k] = v[0] + } + if cp.Password == "" { + pgpass(&cp) + } + return cp, nil +} + +var dsnRegexp = regexp.MustCompile(`([a-zA-Z_]+)=((?:"[^"]+")|(?:[^ ]+))`) + +// ParseDSN parses a database DSN (data source name) into a ConnConfig +// +// e.g. ParseDSN("user=username password=password host=1.2.3.4 port=5432 dbname=mydb sslmode=disable") +// +// Any options not used by the connection process are parsed into ConnConfig.RuntimeParams. +// +// e.g. ParseDSN("application_name=pgxtest search_path=admin user=username password=password host=1.2.3.4 dbname=mydb") +// +// ParseDSN tries to match libpq behavior with regard to sslmode. See comments +// for ParseEnvLibpq for more information on the security implications of +// sslmode options. +func ParseDSN(s string) (ConnConfig, error) { + var cp ConnConfig + + m := dsnRegexp.FindAllStringSubmatch(s, -1) + + var sslmode string + + cp.RuntimeParams = make(map[string]string) + + for _, b := range m { + switch b[1] { + case "user": + cp.User = b[2] + case "password": + cp.Password = b[2] + case "host": + cp.Host = b[2] + case "port": + p, err := strconv.ParseUint(b[2], 10, 16) + if err != nil { + return cp, err + } + cp.Port = uint16(p) + case "dbname": + cp.Database = b[2] + case "sslmode": + sslmode = b[2] + default: + cp.RuntimeParams[b[1]] = b[2] + } + } + + err := configSSL(sslmode, &cp) + if err != nil { + return cp, err + } + if cp.Password == "" { + pgpass(&cp) + } + return cp, nil +} + +// ParseConnectionString parses either a URI or a DSN connection string. +// see ParseURI and ParseDSN for details. +func ParseConnectionString(s string) (ConnConfig, error) { + if strings.HasPrefix(s, "postgres://") || strings.HasPrefix(s, "postgresql://") { + return ParseURI(s) + } + return ParseDSN(s) +} + +// ParseEnvLibpq parses the environment like libpq does into a ConnConfig +// +// See http://www.postgresql.org/docs/9.4/static/libpq-envars.html for details +// on the meaning of environment variables. +// +// ParseEnvLibpq currently recognizes the following environment variables: +// PGHOST +// PGPORT +// PGDATABASE +// PGUSER +// PGPASSWORD +// PGSSLMODE +// PGAPPNAME +// +// Important TLS Security Notes: +// ParseEnvLibpq tries to match libpq behavior with regard to PGSSLMODE. This +// includes defaulting to "prefer" behavior if no environment variable is set. +// +// See http://www.postgresql.org/docs/9.4/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION +// for details on what level of security each sslmode provides. +// +// "require" and "verify-ca" modes currently are treated as "verify-full". e.g. +// They have stronger security guarantees than they would with libpq. Do not +// rely on this behavior as it may be possible to match libpq in the future. If +// you need full security use "verify-full". +// +// Several of the PGSSLMODE options (including the default behavior of "prefer") +// will set UseFallbackTLS to true and FallbackTLSConfig to a disabled or +// weakened TLS mode. This means that if ParseEnvLibpq is used, but TLSConfig is +// later set from a different source that UseFallbackTLS MUST be set false to +// avoid the possibility of falling back to weaker or disabled security. +func ParseEnvLibpq() (ConnConfig, error) { + var cc ConnConfig + + cc.Host = os.Getenv("PGHOST") + + if pgport := os.Getenv("PGPORT"); pgport != "" { + if port, err := strconv.ParseUint(pgport, 10, 16); err == nil { + cc.Port = uint16(port) + } else { + return cc, err + } + } + + cc.Database = os.Getenv("PGDATABASE") + cc.User = os.Getenv("PGUSER") + cc.Password = os.Getenv("PGPASSWORD") + + sslmode := os.Getenv("PGSSLMODE") + + err := configSSL(sslmode, &cc) + if err != nil { + return cc, err + } + + cc.RuntimeParams = make(map[string]string) + if appname := os.Getenv("PGAPPNAME"); appname != "" { + cc.RuntimeParams["application_name"] = appname + } + if cc.Password == "" { + pgpass(&cc) + } + return cc, nil +} + +func configSSL(sslmode string, cc *ConnConfig) error { + // Match libpq default behavior + if sslmode == "" { + sslmode = "prefer" + } + + switch sslmode { + case "disable": + case "allow": + cc.UseFallbackTLS = true + cc.FallbackTLSConfig = &tls.Config{InsecureSkipVerify: true} + case "prefer": + cc.TLSConfig = &tls.Config{InsecureSkipVerify: true} + cc.UseFallbackTLS = true + cc.FallbackTLSConfig = nil + case "require", "verify-ca", "verify-full": + cc.TLSConfig = &tls.Config{ + ServerName: cc.Host, + } + default: + return errors.New("sslmode is invalid") + } + + return nil +} + +// Prepare creates a prepared statement with name and sql. sql can contain placeholders +// for bound parameters. These placeholders are referenced positional as $1, $2, etc. +// +// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with the same +// name and sql arguments. This allows a code path to Prepare and Query/Exec without +// concern for if the statement has already been prepared. +func (c *Conn) Prepare(name, sql string) (ps *PreparedStatement, err error) { + return c.PrepareEx(name, sql, nil) +} + +// PrepareEx creates a prepared statement with name and sql. sql can contain placeholders +// for bound parameters. These placeholders are referenced positional as $1, $2, etc. +// It defers from Prepare as it allows additional options (such as parameter OIDs) to be passed via struct +// +// PrepareEx is idempotent; i.e. it is safe to call PrepareEx multiple times with the same +// name and sql arguments. This allows a code path to PrepareEx and Query/Exec without +// concern for if the statement has already been prepared. +func (c *Conn) PrepareEx(name, sql string, opts *PrepareExOptions) (ps *PreparedStatement, err error) { + if name != "" { + if ps, ok := c.preparedStatements[name]; ok && ps.SQL == sql { + return ps, nil + } + } + + if c.shouldLog(LogLevelError) { + defer func() { + if err != nil { + c.log(LogLevelError, fmt.Sprintf("Prepare `%s` as `%s` failed: %v", name, sql, err)) + } + }() + } + + // parse + wbuf := newWriteBuf(c, 'P') + wbuf.WriteCString(name) + wbuf.WriteCString(sql) + + if opts != nil { + if len(opts.ParameterOids) > 65535 { + return nil, fmt.Errorf("Number of PrepareExOptions ParameterOids must be between 0 and 65535, received %d", len(opts.ParameterOids)) + } + wbuf.WriteInt16(int16(len(opts.ParameterOids))) + for _, oid := range opts.ParameterOids { + wbuf.WriteInt32(int32(oid)) + } + } else { + wbuf.WriteInt16(0) + } + + // describe + wbuf.startMsg('D') + wbuf.WriteByte('S') + wbuf.WriteCString(name) + + // sync + wbuf.startMsg('S') + wbuf.closeMsg() + + _, err = c.conn.Write(wbuf.buf) + if err != nil { + c.die(err) + return nil, err + } + + ps = &PreparedStatement{Name: name, SQL: sql} + + var softErr error + + for { + var t byte + var r *msgReader + t, r, err := c.rxMsg() + if err != nil { + return nil, err + } + + switch t { + case parseComplete: + case parameterDescription: + ps.ParameterOids = c.rxParameterDescription(r) + + if len(ps.ParameterOids) > 65535 && softErr == nil { + softErr = fmt.Errorf("PostgreSQL supports maximum of 65535 parameters, received %d", len(ps.ParameterOids)) + } + case rowDescription: + ps.FieldDescriptions = c.rxRowDescription(r) + for i := range ps.FieldDescriptions { + t, _ := c.PgTypes[ps.FieldDescriptions[i].DataType] + ps.FieldDescriptions[i].DataTypeName = t.Name + ps.FieldDescriptions[i].FormatCode = t.DefaultFormat + } + case noData: + case readyForQuery: + c.rxReadyForQuery(r) + + if softErr == nil { + c.preparedStatements[name] = ps + } + + return ps, softErr + default: + if e := c.processContextFreeMsg(t, r); e != nil && softErr == nil { + softErr = e + } + } + } +} + +// Deallocate released a prepared statement +func (c *Conn) Deallocate(name string) (err error) { + delete(c.preparedStatements, name) + + // close + wbuf := newWriteBuf(c, 'C') + wbuf.WriteByte('S') + wbuf.WriteCString(name) + + // flush + wbuf.startMsg('H') + wbuf.closeMsg() + + _, err = c.conn.Write(wbuf.buf) + if err != nil { + c.die(err) + return err + } + + for { + var t byte + var r *msgReader + t, r, err := c.rxMsg() + if err != nil { + return err + } + + switch t { + case closeComplete: + return nil + default: + err = c.processContextFreeMsg(t, r) + if err != nil { + return err + } + } + } +} + +// Listen establishes a PostgreSQL listen/notify to channel +func (c *Conn) Listen(channel string) error { + _, err := c.Exec("listen " + quoteIdentifier(channel)) + if err != nil { + return err + } + + c.channels[channel] = struct{}{} + + return nil +} + +// Unlisten unsubscribes from a listen channel +func (c *Conn) Unlisten(channel string) error { + _, err := c.Exec("unlisten " + quoteIdentifier(channel)) + if err != nil { + return err + } + + delete(c.channels, channel) + return nil +} + +// WaitForNotification waits for a PostgreSQL notification for up to timeout. +// If the timeout occurs it returns pgx.ErrNotificationTimeout +func (c *Conn) WaitForNotification(timeout time.Duration) (*Notification, error) { + // Return already received notification immediately + if len(c.notifications) > 0 { + notification := c.notifications[0] + c.notifications = c.notifications[1:] + return notification, nil + } + + stopTime := time.Now().Add(timeout) + + for { + now := time.Now() + + if now.After(stopTime) { + return nil, ErrNotificationTimeout + } + + // If there has been no activity on this connection for a while send a nop message just to ensure + // the connection is alive + nextEnsureAliveTime := c.lastActivityTime.Add(15 * time.Second) + if nextEnsureAliveTime.Before(now) { + // If the server can't respond to a nop in 15 seconds, assume it's dead + err := c.conn.SetReadDeadline(now.Add(15 * time.Second)) + if err != nil { + return nil, err + } + + _, err = c.Exec("--;") + if err != nil { + return nil, err + } + + c.lastActivityTime = now + } + + var deadline time.Time + if stopTime.Before(nextEnsureAliveTime) { + deadline = stopTime + } else { + deadline = nextEnsureAliveTime + } + + notification, err := c.waitForNotification(deadline) + if err != ErrNotificationTimeout { + return notification, err + } + } +} + +func (c *Conn) waitForNotification(deadline time.Time) (*Notification, error) { + var zeroTime time.Time + + for { + // Use SetReadDeadline to implement the timeout. SetReadDeadline will + // cause operations to fail with a *net.OpError that has a Timeout() + // of true. Because the normal pgx rxMsg path considers any error to + // have potentially corrupted the state of the connection, it dies + // on any errors. So to avoid timeout errors in rxMsg we set the + // deadline and peek into the reader. If a timeout error occurs there + // we don't break the pgx connection. If the Peek returns that data + // is available then we turn off the read deadline before the rxMsg. + err := c.conn.SetReadDeadline(deadline) + if err != nil { + return nil, err + } + + // Wait until there is a byte available before continuing onto the normal msg reading path + _, err = c.reader.Peek(1) + if err != nil { + c.conn.SetReadDeadline(zeroTime) // we can only return one error and we already have one -- so ignore possiple error from SetReadDeadline + if err, ok := err.(*net.OpError); ok && err.Timeout() { + return nil, ErrNotificationTimeout + } + return nil, err + } + + err = c.conn.SetReadDeadline(zeroTime) + if err != nil { + return nil, err + } + + var t byte + var r *msgReader + if t, r, err = c.rxMsg(); err == nil { + if err = c.processContextFreeMsg(t, r); err != nil { + return nil, err + } + } else { + return nil, err + } + + if len(c.notifications) > 0 { + notification := c.notifications[0] + c.notifications = c.notifications[1:] + return notification, nil + } + } +} + +func (c *Conn) IsAlive() bool { + return c.alive +} + +func (c *Conn) CauseOfDeath() error { + return c.causeOfDeath +} + +func (c *Conn) sendQuery(sql string, arguments ...interface{}) (err error) { + if ps, present := c.preparedStatements[sql]; present { + return c.sendPreparedQuery(ps, arguments...) + } + return c.sendSimpleQuery(sql, arguments...) +} + +func (c *Conn) sendSimpleQuery(sql string, args ...interface{}) error { + + if len(args) == 0 { + wbuf := newWriteBuf(c, 'Q') + wbuf.WriteCString(sql) + wbuf.closeMsg() + + _, err := c.conn.Write(wbuf.buf) + if err != nil { + c.die(err) + return err + } + + return nil + } + + ps, err := c.Prepare("", sql) + if err != nil { + return err + } + + return c.sendPreparedQuery(ps, args...) +} + +func (c *Conn) sendPreparedQuery(ps *PreparedStatement, arguments ...interface{}) (err error) { + if len(ps.ParameterOids) != len(arguments) { + return fmt.Errorf("Prepared statement \"%v\" requires %d parameters, but %d were provided", ps.Name, len(ps.ParameterOids), len(arguments)) + } + + // bind + wbuf := newWriteBuf(c, 'B') + wbuf.WriteByte(0) + wbuf.WriteCString(ps.Name) + + wbuf.WriteInt16(int16(len(ps.ParameterOids))) + for i, oid := range ps.ParameterOids { + switch arg := arguments[i].(type) { + case Encoder: + wbuf.WriteInt16(arg.FormatCode()) + case string, *string: + wbuf.WriteInt16(TextFormatCode) + default: + switch oid { + case BoolOid, ByteaOid, Int2Oid, Int4Oid, Int8Oid, Float4Oid, Float8Oid, TimestampTzOid, TimestampTzArrayOid, TimestampOid, TimestampArrayOid, DateOid, BoolArrayOid, ByteaArrayOid, Int2ArrayOid, Int4ArrayOid, Int8ArrayOid, Float4ArrayOid, Float8ArrayOid, TextArrayOid, VarcharArrayOid, OidOid, InetOid, CidrOid, InetArrayOid, CidrArrayOid, RecordOid, JsonOid, JsonbOid: + wbuf.WriteInt16(BinaryFormatCode) + default: + wbuf.WriteInt16(TextFormatCode) + } + } + } + + wbuf.WriteInt16(int16(len(arguments))) + for i, oid := range ps.ParameterOids { + if err := Encode(wbuf, oid, arguments[i]); err != nil { + return err + } + } + + wbuf.WriteInt16(int16(len(ps.FieldDescriptions))) + for _, fd := range ps.FieldDescriptions { + wbuf.WriteInt16(fd.FormatCode) + } + + // execute + wbuf.startMsg('E') + wbuf.WriteByte(0) + wbuf.WriteInt32(0) + + // sync + wbuf.startMsg('S') + wbuf.closeMsg() + + _, err = c.conn.Write(wbuf.buf) + if err != nil { + c.die(err) + } + + return err +} + +// Exec executes sql. sql can be either a prepared statement name or an SQL string. +// arguments should be referenced positionally from the sql string as $1, $2, etc. +func (c *Conn) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) { + if err = c.lock(); err != nil { + return commandTag, err + } + + startTime := time.Now() + c.lastActivityTime = startTime + + defer func() { + if err == nil { + if c.shouldLog(LogLevelInfo) { + endTime := time.Now() + c.log(LogLevelInfo, "Exec", "sql", sql, "args", logQueryArgs(arguments), "time", endTime.Sub(startTime), "commandTag", commandTag) + } + } else { + if c.shouldLog(LogLevelError) { + c.log(LogLevelError, "Exec", "sql", sql, "args", logQueryArgs(arguments), "error", err) + } + } + + if unlockErr := c.unlock(); unlockErr != nil && err == nil { + err = unlockErr + } + }() + + if err = c.sendQuery(sql, arguments...); err != nil { + return + } + + var softErr error + + for { + var t byte + var r *msgReader + t, r, err = c.rxMsg() + if err != nil { + return commandTag, err + } + + switch t { + case readyForQuery: + c.rxReadyForQuery(r) + return commandTag, softErr + case rowDescription: + case dataRow: + case bindComplete: + case commandComplete: + commandTag = CommandTag(r.readCString()) + default: + if e := c.processContextFreeMsg(t, r); e != nil && softErr == nil { + softErr = e + } + } + } +} + +// Processes messages that are not exclusive to one context such as +// authentication or query response. The response to these messages +// is the same regardless of when they occur. +func (c *Conn) processContextFreeMsg(t byte, r *msgReader) (err error) { + switch t { + case 'S': + c.rxParameterStatus(r) + return nil + case errorResponse: + return c.rxErrorResponse(r) + case noticeResponse: + return nil + case emptyQueryResponse: + return nil + case notificationResponse: + c.rxNotificationResponse(r) + return nil + default: + return fmt.Errorf("Received unknown message type: %c", t) + } +} + +func (c *Conn) rxMsg() (t byte, r *msgReader, err error) { + if !c.alive { + return 0, nil, ErrDeadConn + } + + t, err = c.mr.rxMsg() + if err != nil { + c.die(err) + } + + c.lastActivityTime = time.Now() + + if c.shouldLog(LogLevelTrace) { + c.log(LogLevelTrace, "rxMsg", "type", string(t), "msgBytesRemaining", c.mr.msgBytesRemaining) + } + + return t, &c.mr, err +} + +func (c *Conn) rxAuthenticationX(r *msgReader) (err error) { + switch r.readInt32() { + case 0: // AuthenticationOk + case 3: // AuthenticationCleartextPassword + err = c.txPasswordMessage(c.config.Password) + case 5: // AuthenticationMD5Password + salt := r.readString(4) + digestedPassword := "md5" + hexMD5(hexMD5(c.config.Password+c.config.User)+salt) + err = c.txPasswordMessage(digestedPassword) + default: + err = errors.New("Received unknown authentication message") + } + + return +} + +func hexMD5(s string) string { + hash := md5.New() + io.WriteString(hash, s) + return hex.EncodeToString(hash.Sum(nil)) +} + +func (c *Conn) rxParameterStatus(r *msgReader) { + key := r.readCString() + value := r.readCString() + c.RuntimeParams[key] = value +} + +func (c *Conn) rxErrorResponse(r *msgReader) (err PgError) { + for { + switch r.readByte() { + case 'S': + err.Severity = r.readCString() + case 'C': + err.Code = r.readCString() + case 'M': + err.Message = r.readCString() + case 'D': + err.Detail = r.readCString() + case 'H': + err.Hint = r.readCString() + case 'P': + s := r.readCString() + n, _ := strconv.ParseInt(s, 10, 32) + err.Position = int32(n) + case 'p': + s := r.readCString() + n, _ := strconv.ParseInt(s, 10, 32) + err.InternalPosition = int32(n) + case 'q': + err.InternalQuery = r.readCString() + case 'W': + err.Where = r.readCString() + case 's': + err.SchemaName = r.readCString() + case 't': + err.TableName = r.readCString() + case 'c': + err.ColumnName = r.readCString() + case 'd': + err.DataTypeName = r.readCString() + case 'n': + err.ConstraintName = r.readCString() + case 'F': + err.File = r.readCString() + case 'L': + s := r.readCString() + n, _ := strconv.ParseInt(s, 10, 32) + err.Line = int32(n) + case 'R': + err.Routine = r.readCString() + + case 0: // End of error message + if err.Severity == "FATAL" { + c.die(err) + } + return + default: // Ignore other error fields + r.readCString() + } + } +} + +func (c *Conn) rxBackendKeyData(r *msgReader) { + c.Pid = r.readInt32() + c.SecretKey = r.readInt32() +} + +func (c *Conn) rxReadyForQuery(r *msgReader) { + c.TxStatus = r.readByte() +} + +func (c *Conn) rxRowDescription(r *msgReader) (fields []FieldDescription) { + fieldCount := r.readInt16() + fields = make([]FieldDescription, fieldCount) + for i := int16(0); i < fieldCount; i++ { + f := &fields[i] + f.Name = r.readCString() + f.Table = r.readOid() + f.AttributeNumber = r.readInt16() + f.DataType = r.readOid() + f.DataTypeSize = r.readInt16() + f.Modifier = r.readInt32() + f.FormatCode = r.readInt16() + } + return +} + +func (c *Conn) rxParameterDescription(r *msgReader) (parameters []Oid) { + // Internally, PostgreSQL supports greater than 64k parameters to a prepared + // statement. But the parameter description uses a 16-bit integer for the + // count of parameters. If there are more than 64K parameters, this count is + // wrong. So read the count, ignore it, and compute the proper value from + // the size of the message. + r.readInt16() + parameterCount := r.msgBytesRemaining / 4 + + parameters = make([]Oid, 0, parameterCount) + + for i := int32(0); i < parameterCount; i++ { + parameters = append(parameters, r.readOid()) + } + return +} + +func (c *Conn) rxNotificationResponse(r *msgReader) { + n := new(Notification) + n.Pid = r.readInt32() + n.Channel = r.readCString() + n.Payload = r.readCString() + c.notifications = append(c.notifications, n) +} + +func (c *Conn) startTLS(tlsConfig *tls.Config) (err error) { + err = binary.Write(c.conn, binary.BigEndian, []int32{8, 80877103}) + if err != nil { + return + } + + response := make([]byte, 1) + if _, err = io.ReadFull(c.conn, response); err != nil { + return + } + + if response[0] != 'S' { + return ErrTLSRefused + } + + c.conn = tls.Client(c.conn, tlsConfig) + + return nil +} + +func (c *Conn) txStartupMessage(msg *startupMessage) error { + _, err := c.conn.Write(msg.Bytes()) + return err +} + +func (c *Conn) txPasswordMessage(password string) (err error) { + wbuf := newWriteBuf(c, 'p') + wbuf.WriteCString(password) + wbuf.closeMsg() + + _, err = c.conn.Write(wbuf.buf) + + return err +} + +func (c *Conn) die(err error) { + c.alive = false + c.causeOfDeath = err + c.conn.Close() +} + +func (c *Conn) lock() error { + if c.busy { + return ErrConnBusy + } + c.busy = true + return nil +} + +func (c *Conn) unlock() error { + if !c.busy { + return errors.New("unlock conn that is not busy") + } + c.busy = false + return nil +} + +func (c *Conn) shouldLog(lvl int) bool { + return c.logger != nil && c.logLevel >= lvl +} + +func (c *Conn) log(lvl int, msg string, ctx ...interface{}) { + if c.Pid != 0 { + ctx = append(ctx, "pid", c.Pid) + } + + switch lvl { + case LogLevelTrace: + c.logger.Debug(msg, ctx...) + case LogLevelDebug: + c.logger.Debug(msg, ctx...) + case LogLevelInfo: + c.logger.Info(msg, ctx...) + case LogLevelWarn: + c.logger.Warn(msg, ctx...) + case LogLevelError: + c.logger.Error(msg, ctx...) + } +} + +// SetLogger replaces the current logger and returns the previous logger. +func (c *Conn) SetLogger(logger Logger) Logger { + oldLogger := c.logger + c.logger = logger + return oldLogger +} + +// SetLogLevel replaces the current log level and returns the previous log +// level. +func (c *Conn) SetLogLevel(lvl int) (int, error) { + oldLvl := c.logLevel + + if lvl < LogLevelNone || lvl > LogLevelTrace { + return oldLvl, ErrInvalidLogLevel + } + + c.logLevel = lvl + return lvl, nil +} + +func quoteIdentifier(s string) string { + return `"` + strings.Replace(s, `"`, `""`, -1) + `"` +} diff --git a/vendor/github.com/jackc/pgx/conn_config_test.go.example b/vendor/github.com/jackc/pgx/conn_config_test.go.example new file mode 100644 index 0000000..cac798b --- /dev/null +++ b/vendor/github.com/jackc/pgx/conn_config_test.go.example @@ -0,0 +1,25 @@ +package pgx_test + +import ( + "github.com/jackc/pgx" +) + +var defaultConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"} + +// To skip tests for specific connection / authentication types set that connection param to nil +var tcpConnConfig *pgx.ConnConfig = nil +var unixSocketConnConfig *pgx.ConnConfig = nil +var md5ConnConfig *pgx.ConnConfig = nil +var plainPasswordConnConfig *pgx.ConnConfig = nil +var invalidUserConnConfig *pgx.ConnConfig = nil +var tlsConnConfig *pgx.ConnConfig = nil +var customDialerConnConfig *pgx.ConnConfig = nil +var replicationConnConfig *pgx.ConnConfig = nil + +// var tcpConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"} +// var unixSocketConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "/private/tmp", User: "pgx_none", Database: "pgx_test"} +// var md5ConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"} +// var plainPasswordConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_pw", Password: "secret", Database: "pgx_test"} +// var invalidUserConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "invalid", Database: "pgx_test"} +// var tlsConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test", TLSConfig: &tls.Config{InsecureSkipVerify: true}} +// var customDialerConnConfig *pgx.ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"} diff --git a/vendor/github.com/jackc/pgx/conn_config_test.go.travis b/vendor/github.com/jackc/pgx/conn_config_test.go.travis new file mode 100644 index 0000000..75714bf --- /dev/null +++ b/vendor/github.com/jackc/pgx/conn_config_test.go.travis @@ -0,0 +1,30 @@ +package pgx_test + +import ( + "crypto/tls" + "github.com/jackc/pgx" + "os" + "strconv" +) + +var defaultConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"} +var tcpConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"} +var unixSocketConnConfig = &pgx.ConnConfig{Host: "/var/run/postgresql", User: "postgres", Database: "pgx_test"} +var md5ConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"} +var plainPasswordConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_pw", Password: "secret", Database: "pgx_test"} +var invalidUserConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "invalid", Database: "pgx_test"} +var tlsConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_ssl", Password: "secret", Database: "pgx_test", TLSConfig: &tls.Config{InsecureSkipVerify: true}} +var customDialerConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_md5", Password: "secret", Database: "pgx_test"} +var replicationConnConfig *pgx.ConnConfig = nil + +func init() { + version := os.Getenv("PGVERSION") + + if len(version) > 0 { + v, err := strconv.ParseFloat(version,64) + if err == nil && v >= 9.6 { + replicationConnConfig = &pgx.ConnConfig{Host: "127.0.0.1", User: "pgx_replication", Password: "secret", Database: "pgx_test"} + } + } +} + diff --git a/vendor/github.com/jackc/pgx/conn_pool.go b/vendor/github.com/jackc/pgx/conn_pool.go new file mode 100644 index 0000000..1913699 --- /dev/null +++ b/vendor/github.com/jackc/pgx/conn_pool.go @@ -0,0 +1,529 @@ +package pgx + +import ( + "errors" + "sync" + "time" +) + +type ConnPoolConfig struct { + ConnConfig + MaxConnections int // max simultaneous connections to use, default 5, must be at least 2 + AfterConnect func(*Conn) error // function to call on every new connection + AcquireTimeout time.Duration // max wait time when all connections are busy (0 means no timeout) +} + +type ConnPool struct { + allConnections []*Conn + availableConnections []*Conn + cond *sync.Cond + config ConnConfig // config used when establishing connection + inProgressConnects int + maxConnections int + resetCount int + afterConnect func(*Conn) error + logger Logger + logLevel int + closed bool + preparedStatements map[string]*PreparedStatement + acquireTimeout time.Duration + pgTypes map[Oid]PgType + pgsqlAfInet *byte + pgsqlAfInet6 *byte + txAfterClose func(tx *Tx) + rowsAfterClose func(rows *Rows) +} + +type ConnPoolStat struct { + MaxConnections int // max simultaneous connections to use + CurrentConnections int // current live connections + AvailableConnections int // unused live connections +} + +// ErrAcquireTimeout occurs when an attempt to acquire a connection times out. +var ErrAcquireTimeout = errors.New("timeout acquiring connection from pool") + +// NewConnPool creates a new ConnPool. config.ConnConfig is passed through to +// Connect directly. +func NewConnPool(config ConnPoolConfig) (p *ConnPool, err error) { + p = new(ConnPool) + p.config = config.ConnConfig + p.maxConnections = config.MaxConnections + if p.maxConnections == 0 { + p.maxConnections = 5 + } + if p.maxConnections < 1 { + return nil, errors.New("MaxConnections must be at least 1") + } + p.acquireTimeout = config.AcquireTimeout + if p.acquireTimeout < 0 { + return nil, errors.New("AcquireTimeout must be equal to or greater than 0") + } + + p.afterConnect = config.AfterConnect + + if config.LogLevel != 0 { + p.logLevel = config.LogLevel + } else { + // Preserve pre-LogLevel behavior by defaulting to LogLevelDebug + p.logLevel = LogLevelDebug + } + p.logger = config.Logger + if p.logger == nil { + p.logLevel = LogLevelNone + } + + p.txAfterClose = func(tx *Tx) { + p.Release(tx.Conn()) + } + + p.rowsAfterClose = func(rows *Rows) { + p.Release(rows.Conn()) + } + + p.allConnections = make([]*Conn, 0, p.maxConnections) + p.availableConnections = make([]*Conn, 0, p.maxConnections) + p.preparedStatements = make(map[string]*PreparedStatement) + p.cond = sync.NewCond(new(sync.Mutex)) + + // Initially establish one connection + var c *Conn + c, err = p.createConnection() + if err != nil { + return + } + p.allConnections = append(p.allConnections, c) + p.availableConnections = append(p.availableConnections, c) + + return +} + +// Acquire takes exclusive use of a connection until it is released. +func (p *ConnPool) Acquire() (*Conn, error) { + p.cond.L.Lock() + c, err := p.acquire(nil) + p.cond.L.Unlock() + return c, err +} + +// deadlinePassed returns true if the given deadline has passed. +func (p *ConnPool) deadlinePassed(deadline *time.Time) bool { + return deadline != nil && time.Now().After(*deadline) +} + +// acquire performs acquision assuming pool is already locked +func (p *ConnPool) acquire(deadline *time.Time) (*Conn, error) { + if p.closed { + return nil, errors.New("cannot acquire from closed pool") + } + + // A connection is available + if len(p.availableConnections) > 0 { + c := p.availableConnections[len(p.availableConnections)-1] + c.poolResetCount = p.resetCount + p.availableConnections = p.availableConnections[:len(p.availableConnections)-1] + return c, nil + } + + // Set initial timeout/deadline value. If the method (acquire) happens to + // recursively call itself the deadline should retain its value. + if deadline == nil && p.acquireTimeout > 0 { + tmp := time.Now().Add(p.acquireTimeout) + deadline = &tmp + } + + // Make sure the deadline (if it is) has not passed yet + if p.deadlinePassed(deadline) { + return nil, ErrAcquireTimeout + } + + // If there is a deadline then start a timeout timer + var timer *time.Timer + if deadline != nil { + timer = time.AfterFunc(deadline.Sub(time.Now()), func() { + p.cond.Broadcast() + }) + defer timer.Stop() + } + + // No connections are available, but we can create more + if len(p.allConnections)+p.inProgressConnects < p.maxConnections { + // Create a new connection. + // Careful here: createConnectionUnlocked() removes the current lock, + // creates a connection and then locks it back. + c, err := p.createConnectionUnlocked() + if err != nil { + return nil, err + } + c.poolResetCount = p.resetCount + p.allConnections = append(p.allConnections, c) + return c, nil + } + // All connections are in use and we cannot create more + if p.logLevel >= LogLevelWarn { + p.logger.Warn("All connections in pool are busy - waiting...") + } + + // Wait until there is an available connection OR room to create a new connection + for len(p.availableConnections) == 0 && len(p.allConnections)+p.inProgressConnects == p.maxConnections { + if p.deadlinePassed(deadline) { + return nil, ErrAcquireTimeout + } + p.cond.Wait() + } + + // Stop the timer so that we do not spawn it on every acquire call. + if timer != nil { + timer.Stop() + } + return p.acquire(deadline) +} + +// Release gives up use of a connection. +func (p *ConnPool) Release(conn *Conn) { + if conn.TxStatus != 'I' { + conn.Exec("rollback") + } + + if len(conn.channels) > 0 { + if err := conn.Unlisten("*"); err != nil { + conn.die(err) + } + conn.channels = make(map[string]struct{}) + } + conn.notifications = nil + + p.cond.L.Lock() + + if conn.poolResetCount != p.resetCount { + conn.Close() + p.cond.L.Unlock() + p.cond.Signal() + return + } + + if conn.IsAlive() { + p.availableConnections = append(p.availableConnections, conn) + } else { + p.removeFromAllConnections(conn) + } + p.cond.L.Unlock() + p.cond.Signal() +} + +// removeFromAllConnections Removes the given connection from the list. +// It returns true if the connection was found and removed or false otherwise. +func (p *ConnPool) removeFromAllConnections(conn *Conn) bool { + for i, c := range p.allConnections { + if conn == c { + p.allConnections = append(p.allConnections[:i], p.allConnections[i+1:]...) + return true + } + } + return false +} + +// Close ends the use of a connection pool. It prevents any new connections +// from being acquired, waits until all acquired connections are released, +// then closes all underlying connections. +func (p *ConnPool) Close() { + p.cond.L.Lock() + defer p.cond.L.Unlock() + + p.closed = true + + // Wait until all connections are released + if len(p.availableConnections) != len(p.allConnections) { + for len(p.availableConnections) != len(p.allConnections) { + p.cond.Wait() + } + } + + for _, c := range p.allConnections { + _ = c.Close() + } +} + +// Reset closes all open connections, but leaves the pool open. It is intended +// for use when an error is detected that would disrupt all connections (such as +// a network interruption or a server state change). +// +// It is safe to reset a pool while connections are checked out. Those +// connections will be closed when they are returned to the pool. +func (p *ConnPool) Reset() { + p.cond.L.Lock() + defer p.cond.L.Unlock() + + p.resetCount++ + p.allConnections = p.allConnections[0:0] + + for _, conn := range p.availableConnections { + conn.Close() + } + + p.availableConnections = p.availableConnections[0:0] +} + +// invalidateAcquired causes all acquired connections to be closed when released. +// The pool must already be locked. +func (p *ConnPool) invalidateAcquired() { + p.resetCount++ + + for _, c := range p.availableConnections { + c.poolResetCount = p.resetCount + } + + p.allConnections = p.allConnections[:len(p.availableConnections)] + copy(p.allConnections, p.availableConnections) +} + +// Stat returns connection pool statistics +func (p *ConnPool) Stat() (s ConnPoolStat) { + p.cond.L.Lock() + defer p.cond.L.Unlock() + + s.MaxConnections = p.maxConnections + s.CurrentConnections = len(p.allConnections) + s.AvailableConnections = len(p.availableConnections) + return +} + +func (p *ConnPool) createConnection() (*Conn, error) { + c, err := connect(p.config, p.pgTypes, p.pgsqlAfInet, p.pgsqlAfInet6) + if err != nil { + return nil, err + } + return p.afterConnectionCreated(c) +} + +// createConnectionUnlocked Removes the current lock, creates a new connection, and +// then locks it back. +// Here is the point: lets say our pool dialer's OpenTimeout is set to 3 seconds. +// And we have a pool with 20 connections in it, and we try to acquire them all at +// startup. +// If it happens that the remote server is not accessible, then the first connection +// in the pool blocks all the others for 3 secs, before it gets the timeout. Then +// connection #2 holds the lock and locks everything for the next 3 secs until it +// gets OpenTimeout err, etc. And the very last 20th connection will fail only after +// 3 * 20 = 60 secs. +// To avoid this we put Connect(p.config) outside of the lock (it is thread safe) +// what would allow us to make all the 20 connection in parallel (more or less). +func (p *ConnPool) createConnectionUnlocked() (*Conn, error) { + p.inProgressConnects++ + p.cond.L.Unlock() + c, err := Connect(p.config) + p.cond.L.Lock() + p.inProgressConnects-- + + if err != nil { + return nil, err + } + return p.afterConnectionCreated(c) +} + +// afterConnectionCreated executes (if it is) afterConnect() callback and prepares +// all the known statements for the new connection. +func (p *ConnPool) afterConnectionCreated(c *Conn) (*Conn, error) { + p.pgTypes = c.PgTypes + p.pgsqlAfInet = c.pgsqlAfInet + p.pgsqlAfInet6 = c.pgsqlAfInet6 + + if p.afterConnect != nil { + err := p.afterConnect(c) + if err != nil { + c.die(err) + return nil, err + } + } + + for _, ps := range p.preparedStatements { + if _, err := c.Prepare(ps.Name, ps.SQL); err != nil { + c.die(err) + return nil, err + } + } + + return c, nil +} + +// Exec acquires a connection, delegates the call to that connection, and releases the connection +func (p *ConnPool) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) { + var c *Conn + if c, err = p.Acquire(); err != nil { + return + } + defer p.Release(c) + + return c.Exec(sql, arguments...) +} + +// Query acquires a connection and delegates the call to that connection. When +// *Rows are closed, the connection is released automatically. +func (p *ConnPool) Query(sql string, args ...interface{}) (*Rows, error) { + c, err := p.Acquire() + if err != nil { + // Because checking for errors can be deferred to the *Rows, build one with the error + return &Rows{closed: true, err: err}, err + } + + rows, err := c.Query(sql, args...) + if err != nil { + p.Release(c) + return rows, err + } + + rows.AfterClose(p.rowsAfterClose) + + return rows, nil +} + +// QueryRow acquires a connection and delegates the call to that connection. The +// connection is released automatically after Scan is called on the returned +// *Row. +func (p *ConnPool) QueryRow(sql string, args ...interface{}) *Row { + rows, _ := p.Query(sql, args...) + return (*Row)(rows) +} + +// Begin acquires a connection and begins a transaction on it. When the +// transaction is closed the connection will be automatically released. +func (p *ConnPool) Begin() (*Tx, error) { + return p.BeginIso("") +} + +// Prepare creates a prepared statement on a connection in the pool to test the +// statement is valid. If it succeeds all connections accessed through the pool +// will have the statement available. +// +// Prepare creates a prepared statement with name and sql. sql can contain +// placeholders for bound parameters. These placeholders are referenced +// positional as $1, $2, etc. +// +// Prepare is idempotent; i.e. it is safe to call Prepare multiple times with +// the same name and sql arguments. This allows a code path to Prepare and +// Query/Exec/PrepareEx without concern for if the statement has already been prepared. +func (p *ConnPool) Prepare(name, sql string) (*PreparedStatement, error) { + return p.PrepareEx(name, sql, nil) +} + +// PrepareEx creates a prepared statement on a connection in the pool to test the +// statement is valid. If it succeeds all connections accessed through the pool +// will have the statement available. +// +// PrepareEx creates a prepared statement with name and sql. sql can contain placeholders +// for bound parameters. These placeholders are referenced positional as $1, $2, etc. +// It defers from Prepare as it allows additional options (such as parameter OIDs) to be passed via struct +// +// PrepareEx is idempotent; i.e. it is safe to call PrepareEx multiple times with the same +// name and sql arguments. This allows a code path to PrepareEx and Query/Exec/Prepare without +// concern for if the statement has already been prepared. +func (p *ConnPool) PrepareEx(name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) { + p.cond.L.Lock() + defer p.cond.L.Unlock() + + if ps, ok := p.preparedStatements[name]; ok && ps.SQL == sql { + return ps, nil + } + + c, err := p.acquire(nil) + if err != nil { + return nil, err + } + + p.availableConnections = append(p.availableConnections, c) + + // Double check that the statement was not prepared by someone else + // while we were acquiring the connection (since acquire is not fully + // blocking now, see createConnectionUnlocked()) + if ps, ok := p.preparedStatements[name]; ok && ps.SQL == sql { + return ps, nil + } + + ps, err := c.PrepareEx(name, sql, opts) + if err != nil { + return nil, err + } + + for _, c := range p.availableConnections { + _, err := c.PrepareEx(name, sql, opts) + if err != nil { + return nil, err + } + } + + p.invalidateAcquired() + p.preparedStatements[name] = ps + + return ps, err +} + +// Deallocate releases a prepared statement from all connections in the pool. +func (p *ConnPool) Deallocate(name string) (err error) { + p.cond.L.Lock() + defer p.cond.L.Unlock() + + for _, c := range p.availableConnections { + if err := c.Deallocate(name); err != nil { + return err + } + } + + p.invalidateAcquired() + delete(p.preparedStatements, name) + + return nil +} + +// BeginIso acquires a connection and begins a transaction in isolation mode iso +// on it. When the transaction is closed the connection will be automatically +// released. +func (p *ConnPool) BeginIso(iso string) (*Tx, error) { + for { + c, err := p.Acquire() + if err != nil { + return nil, err + } + + tx, err := c.BeginIso(iso) + if err != nil { + alive := c.IsAlive() + p.Release(c) + + // If connection is still alive then the error is not something trying + // again on a new connection would fix, so just return the error. But + // if the connection is dead try to acquire a new connection and try + // again. + if alive { + return nil, err + } + continue + } + + tx.AfterClose(p.txAfterClose) + return tx, nil + } +} + +// Deprecated. Use CopyFrom instead. CopyTo acquires a connection, delegates the +// call to that connection, and releases the connection. +func (p *ConnPool) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) { + c, err := p.Acquire() + if err != nil { + return 0, err + } + defer p.Release(c) + + return c.CopyTo(tableName, columnNames, rowSrc) +} + +// CopyFrom acquires a connection, delegates the call to that connection, and +// releases the connection. +func (p *ConnPool) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int, error) { + c, err := p.Acquire() + if err != nil { + return 0, err + } + defer p.Release(c) + + return c.CopyFrom(tableName, columnNames, rowSrc) +} diff --git a/vendor/github.com/jackc/pgx/copy_from.go b/vendor/github.com/jackc/pgx/copy_from.go new file mode 100644 index 0000000..1f8a230 --- /dev/null +++ b/vendor/github.com/jackc/pgx/copy_from.go @@ -0,0 +1,241 @@ +package pgx + +import ( + "bytes" + "fmt" +) + +// CopyFromRows returns a CopyFromSource interface over the provided rows slice +// making it usable by *Conn.CopyFrom. +func CopyFromRows(rows [][]interface{}) CopyFromSource { + return ©FromRows{rows: rows, idx: -1} +} + +type copyFromRows struct { + rows [][]interface{} + idx int +} + +func (ctr *copyFromRows) Next() bool { + ctr.idx++ + return ctr.idx < len(ctr.rows) +} + +func (ctr *copyFromRows) Values() ([]interface{}, error) { + return ctr.rows[ctr.idx], nil +} + +func (ctr *copyFromRows) Err() error { + return nil +} + +// CopyFromSource is the interface used by *Conn.CopyFrom as the source for copy data. +type CopyFromSource interface { + // Next returns true if there is another row and makes the next row data + // available to Values(). When there are no more rows available or an error + // has occurred it returns false. + Next() bool + + // Values returns the values for the current row. + Values() ([]interface{}, error) + + // Err returns any error that has been encountered by the CopyFromSource. If + // this is not nil *Conn.CopyFrom will abort the copy. + Err() error +} + +type copyFrom struct { + conn *Conn + tableName Identifier + columnNames []string + rowSrc CopyFromSource + readerErrChan chan error +} + +func (ct *copyFrom) readUntilReadyForQuery() { + for { + t, r, err := ct.conn.rxMsg() + if err != nil { + ct.readerErrChan <- err + close(ct.readerErrChan) + return + } + + switch t { + case readyForQuery: + ct.conn.rxReadyForQuery(r) + close(ct.readerErrChan) + return + case commandComplete: + case errorResponse: + ct.readerErrChan <- ct.conn.rxErrorResponse(r) + default: + err = ct.conn.processContextFreeMsg(t, r) + if err != nil { + ct.readerErrChan <- ct.conn.processContextFreeMsg(t, r) + } + } + } +} + +func (ct *copyFrom) waitForReaderDone() error { + var err error + for err = range ct.readerErrChan { + } + return err +} + +func (ct *copyFrom) run() (int, error) { + quotedTableName := ct.tableName.Sanitize() + buf := &bytes.Buffer{} + for i, cn := range ct.columnNames { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(quoteIdentifier(cn)) + } + quotedColumnNames := buf.String() + + ps, err := ct.conn.Prepare("", fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName)) + if err != nil { + return 0, err + } + + err = ct.conn.sendSimpleQuery(fmt.Sprintf("copy %s ( %s ) from stdin binary;", quotedTableName, quotedColumnNames)) + if err != nil { + return 0, err + } + + err = ct.conn.readUntilCopyInResponse() + if err != nil { + return 0, err + } + + go ct.readUntilReadyForQuery() + defer ct.waitForReaderDone() + + wbuf := newWriteBuf(ct.conn, copyData) + + wbuf.WriteBytes([]byte("PGCOPY\n\377\r\n\000")) + wbuf.WriteInt32(0) + wbuf.WriteInt32(0) + + var sentCount int + + for ct.rowSrc.Next() { + select { + case err = <-ct.readerErrChan: + return 0, err + default: + } + + if len(wbuf.buf) > 65536 { + wbuf.closeMsg() + _, err = ct.conn.conn.Write(wbuf.buf) + if err != nil { + ct.conn.die(err) + return 0, err + } + + // Directly manipulate wbuf to reset to reuse the same buffer + wbuf.buf = wbuf.buf[0:5] + wbuf.buf[0] = copyData + wbuf.sizeIdx = 1 + } + + sentCount++ + + values, err := ct.rowSrc.Values() + if err != nil { + ct.cancelCopyIn() + return 0, err + } + if len(values) != len(ct.columnNames) { + ct.cancelCopyIn() + return 0, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values)) + } + + wbuf.WriteInt16(int16(len(ct.columnNames))) + for i, val := range values { + err = Encode(wbuf, ps.FieldDescriptions[i].DataType, val) + if err != nil { + ct.cancelCopyIn() + return 0, err + } + + } + } + + if ct.rowSrc.Err() != nil { + ct.cancelCopyIn() + return 0, ct.rowSrc.Err() + } + + wbuf.WriteInt16(-1) // terminate the copy stream + + wbuf.startMsg(copyDone) + wbuf.closeMsg() + _, err = ct.conn.conn.Write(wbuf.buf) + if err != nil { + ct.conn.die(err) + return 0, err + } + + err = ct.waitForReaderDone() + if err != nil { + return 0, err + } + return sentCount, nil +} + +func (c *Conn) readUntilCopyInResponse() error { + for { + var t byte + var r *msgReader + t, r, err := c.rxMsg() + if err != nil { + return err + } + + switch t { + case copyInResponse: + return nil + default: + err = c.processContextFreeMsg(t, r) + if err != nil { + return err + } + } + } +} + +func (ct *copyFrom) cancelCopyIn() error { + wbuf := newWriteBuf(ct.conn, copyFail) + wbuf.WriteCString("client error: abort") + wbuf.closeMsg() + _, err := ct.conn.conn.Write(wbuf.buf) + if err != nil { + ct.conn.die(err) + return err + } + + return nil +} + +// CopyFrom uses the PostgreSQL copy protocol to perform bulk data insertion. +// It returns the number of rows copied and an error. +// +// CopyFrom requires all values use the binary format. Almost all types +// implemented by pgx use the binary format by default. Types implementing +// Encoder can only be used if they encode to the binary format. +func (c *Conn) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int, error) { + ct := ©From{ + conn: c, + tableName: tableName, + columnNames: columnNames, + rowSrc: rowSrc, + readerErrChan: make(chan error), + } + + return ct.run() +} diff --git a/vendor/github.com/jackc/pgx/copy_to.go b/vendor/github.com/jackc/pgx/copy_to.go new file mode 100644 index 0000000..229e9a4 --- /dev/null +++ b/vendor/github.com/jackc/pgx/copy_to.go @@ -0,0 +1,222 @@ +package pgx + +import ( + "bytes" + "fmt" +) + +// Deprecated. Use CopyFromRows instead. CopyToRows returns a CopyToSource +// interface over the provided rows slice making it usable by *Conn.CopyTo. +func CopyToRows(rows [][]interface{}) CopyToSource { + return ©ToRows{rows: rows, idx: -1} +} + +type copyToRows struct { + rows [][]interface{} + idx int +} + +func (ctr *copyToRows) Next() bool { + ctr.idx++ + return ctr.idx < len(ctr.rows) +} + +func (ctr *copyToRows) Values() ([]interface{}, error) { + return ctr.rows[ctr.idx], nil +} + +func (ctr *copyToRows) Err() error { + return nil +} + +// Deprecated. Use CopyFromSource instead. CopyToSource is the interface used by +// *Conn.CopyTo as the source for copy data. +type CopyToSource interface { + // Next returns true if there is another row and makes the next row data + // available to Values(). When there are no more rows available or an error + // has occurred it returns false. + Next() bool + + // Values returns the values for the current row. + Values() ([]interface{}, error) + + // Err returns any error that has been encountered by the CopyToSource. If + // this is not nil *Conn.CopyTo will abort the copy. + Err() error +} + +type copyTo struct { + conn *Conn + tableName string + columnNames []string + rowSrc CopyToSource + readerErrChan chan error +} + +func (ct *copyTo) readUntilReadyForQuery() { + for { + t, r, err := ct.conn.rxMsg() + if err != nil { + ct.readerErrChan <- err + close(ct.readerErrChan) + return + } + + switch t { + case readyForQuery: + ct.conn.rxReadyForQuery(r) + close(ct.readerErrChan) + return + case commandComplete: + case errorResponse: + ct.readerErrChan <- ct.conn.rxErrorResponse(r) + default: + err = ct.conn.processContextFreeMsg(t, r) + if err != nil { + ct.readerErrChan <- ct.conn.processContextFreeMsg(t, r) + } + } + } +} + +func (ct *copyTo) waitForReaderDone() error { + var err error + for err = range ct.readerErrChan { + } + return err +} + +func (ct *copyTo) run() (int, error) { + quotedTableName := quoteIdentifier(ct.tableName) + buf := &bytes.Buffer{} + for i, cn := range ct.columnNames { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteString(quoteIdentifier(cn)) + } + quotedColumnNames := buf.String() + + ps, err := ct.conn.Prepare("", fmt.Sprintf("select %s from %s", quotedColumnNames, quotedTableName)) + if err != nil { + return 0, err + } + + err = ct.conn.sendSimpleQuery(fmt.Sprintf("copy %s ( %s ) from stdin binary;", quotedTableName, quotedColumnNames)) + if err != nil { + return 0, err + } + + err = ct.conn.readUntilCopyInResponse() + if err != nil { + return 0, err + } + + go ct.readUntilReadyForQuery() + defer ct.waitForReaderDone() + + wbuf := newWriteBuf(ct.conn, copyData) + + wbuf.WriteBytes([]byte("PGCOPY\n\377\r\n\000")) + wbuf.WriteInt32(0) + wbuf.WriteInt32(0) + + var sentCount int + + for ct.rowSrc.Next() { + select { + case err = <-ct.readerErrChan: + return 0, err + default: + } + + if len(wbuf.buf) > 65536 { + wbuf.closeMsg() + _, err = ct.conn.conn.Write(wbuf.buf) + if err != nil { + ct.conn.die(err) + return 0, err + } + + // Directly manipulate wbuf to reset to reuse the same buffer + wbuf.buf = wbuf.buf[0:5] + wbuf.buf[0] = copyData + wbuf.sizeIdx = 1 + } + + sentCount++ + + values, err := ct.rowSrc.Values() + if err != nil { + ct.cancelCopyIn() + return 0, err + } + if len(values) != len(ct.columnNames) { + ct.cancelCopyIn() + return 0, fmt.Errorf("expected %d values, got %d values", len(ct.columnNames), len(values)) + } + + wbuf.WriteInt16(int16(len(ct.columnNames))) + for i, val := range values { + err = Encode(wbuf, ps.FieldDescriptions[i].DataType, val) + if err != nil { + ct.cancelCopyIn() + return 0, err + } + + } + } + + if ct.rowSrc.Err() != nil { + ct.cancelCopyIn() + return 0, ct.rowSrc.Err() + } + + wbuf.WriteInt16(-1) // terminate the copy stream + + wbuf.startMsg(copyDone) + wbuf.closeMsg() + _, err = ct.conn.conn.Write(wbuf.buf) + if err != nil { + ct.conn.die(err) + return 0, err + } + + err = ct.waitForReaderDone() + if err != nil { + return 0, err + } + return sentCount, nil +} + +func (ct *copyTo) cancelCopyIn() error { + wbuf := newWriteBuf(ct.conn, copyFail) + wbuf.WriteCString("client error: abort") + wbuf.closeMsg() + _, err := ct.conn.conn.Write(wbuf.buf) + if err != nil { + ct.conn.die(err) + return err + } + + return nil +} + +// Deprecated. Use CopyFrom instead. CopyTo uses the PostgreSQL copy protocol to +// perform bulk data insertion. It returns the number of rows copied and an +// error. +// +// CopyTo requires all values use the binary format. Almost all types +// implemented by pgx use the binary format by default. Types implementing +// Encoder can only be used if they encode to the binary format. +func (c *Conn) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) { + ct := ©To{ + conn: c, + tableName: tableName, + columnNames: columnNames, + rowSrc: rowSrc, + readerErrChan: make(chan error), + } + + return ct.run() +} diff --git a/vendor/github.com/jackc/pgx/doc.go b/vendor/github.com/jackc/pgx/doc.go new file mode 100644 index 0000000..f527d11 --- /dev/null +++ b/vendor/github.com/jackc/pgx/doc.go @@ -0,0 +1,265 @@ +// Package pgx is a PostgreSQL database driver. +/* +pgx provides lower level access to PostgreSQL than the standard database/sql +It remains as similar to the database/sql interface as possible while +providing better speed and access to PostgreSQL specific features. Import +github.com/jack/pgx/stdlib to use pgx as a database/sql compatible driver. + +Query Interface + +pgx implements Query and Scan in the familiar database/sql style. + + var sum int32 + + // Send the query to the server. The returned rows MUST be closed + // before conn can be used again. + rows, err := conn.Query("select generate_series(1,$1)", 10) + if err != nil { + return err + } + + // rows.Close is called by rows.Next when all rows are read + // or an error occurs in Next or Scan. So it may optionally be + // omitted if nothing in the rows.Next loop can panic. It is + // safe to close rows multiple times. + defer rows.Close() + + // Iterate through the result set + for rows.Next() { + var n int32 + err = rows.Scan(&n) + if err != nil { + return err + } + sum += n + } + + // Any errors encountered by rows.Next or rows.Scan will be returned here + if rows.Err() != nil { + return err + } + + // No errors found - do something with sum + +pgx also implements QueryRow in the same style as database/sql. + + var name string + var weight int64 + err := conn.QueryRow("select name, weight from widgets where id=$1", 42).Scan(&name, &weight) + if err != nil { + return err + } + +Use Exec to execute a query that does not return a result set. + + commandTag, err := conn.Exec("delete from widgets where id=$1", 42) + if err != nil { + return err + } + if commandTag.RowsAffected() != 1 { + return errors.New("No row found to delete") + } + +Connection Pool + +Connection pool usage is explicit and configurable. In pgx, a connection can +be created and managed directly, or a connection pool with a configurable +maximum connections can be used. Also, the connection pool offers an after +connect hook that allows every connection to be automatically setup before +being made available in the connection pool. This is especially useful to +ensure all connections have the same prepared statements available or to +change any other connection settings. + +It delegates Query, QueryRow, Exec, and Begin functions to an automatically +checked out and released connection so you can avoid manually acquiring and +releasing connections when you do not need that level of control. + + var name string + var weight int64 + err := pool.QueryRow("select name, weight from widgets where id=$1", 42).Scan(&name, &weight) + if err != nil { + return err + } + +Base Type Mapping + +pgx maps between all common base types directly between Go and PostgreSQL. In +particular: + + Go PostgreSQL + ----------------------- + string varchar + text + + // Integers are automatically be converted to any other integer type if + // it can be done without overflow or underflow. + int8 + int16 smallint + int32 int + int64 bigint + int + uint8 + uint16 + uint32 + uint64 + uint + + // Floats are strict and do not automatically convert like integers. + float32 float4 + float64 float8 + + time.Time date + timestamp + timestamptz + + []byte bytea + + +Null Mapping + +pgx can map nulls in two ways. The first is Null* types that have a data field +and a valid field. They work in a similar fashion to database/sql. The second +is to use a pointer to a pointer. + + var foo pgx.NullString + var bar *string + err := conn.QueryRow("select foo, bar from widgets where id=$1", 42).Scan(&a, &b) + if err != nil { + return err + } + +Array Mapping + +pgx maps between int16, int32, int64, float32, float64, and string Go slices +and the equivalent PostgreSQL array type. Go slices of native types do not +support nulls, so if a PostgreSQL array that contains a null is read into a +native Go slice an error will occur. + +Hstore Mapping + +pgx includes an Hstore type and a NullHstore type. Hstore is simply a +map[string]string and is preferred when the hstore contains no nulls. NullHstore +follows the Null* pattern and supports null values. + +JSON and JSONB Mapping + +pgx includes built-in support to marshal and unmarshal between Go types and +the PostgreSQL JSON and JSONB. + +Inet and Cidr Mapping + +pgx encodes from net.IPNet to and from inet and cidr PostgreSQL types. In +addition, as a convenience pgx will encode from a net.IP; it will assume a /32 +netmask for IPv4 and a /128 for IPv6. + +Custom Type Support + +pgx includes support for the common data types like integers, floats, strings, +dates, and times that have direct mappings between Go and SQL. Support can be +added for additional types like point, hstore, numeric, etc. that do not have +direct mappings in Go by the types implementing ScannerPgx and Encoder. + +Custom types can support text or binary formats. Binary format can provide a +large performance increase. The natural place for deciding the format for a +value would be in ScannerPgx as it is responsible for decoding the returned +data. However, that is impossible as the query has already been sent by the time +the ScannerPgx is invoked. The solution to this is the global +DefaultTypeFormats. If a custom type prefers binary format it should register it +there. + + pgx.DefaultTypeFormats["point"] = pgx.BinaryFormatCode + +Note that the type is referred to by name, not by OID. This is because custom +PostgreSQL types like hstore will have different OIDs on different servers. When +pgx establishes a connection it queries the pg_type table for all types. It then +matches the names in DefaultTypeFormats with the returned OIDs and stores it in +Conn.PgTypes. + +See example_custom_type_test.go for an example of a custom type for the +PostgreSQL point type. + +pgx also includes support for custom types implementing the database/sql.Scanner +and database/sql/driver.Valuer interfaces. + +Raw Bytes Mapping + +[]byte passed as arguments to Query, QueryRow, and Exec are passed unmodified +to PostgreSQL. In like manner, a *[]byte passed to Scan will be filled with +the raw bytes returned by PostgreSQL. This can be especially useful for reading +varchar, text, json, and jsonb values directly into a []byte and avoiding the +type conversion from string. + +Transactions + +Transactions are started by calling Begin or BeginIso. The BeginIso variant +creates a transaction with a specified isolation level. + + tx, err := conn.Begin() + if err != nil { + return err + } + // Rollback is safe to call even if the tx is already closed, so if + // the tx commits successfully, this is a no-op + defer tx.Rollback() + + _, err = tx.Exec("insert into foo(id) values (1)") + if err != nil { + return err + } + + err = tx.Commit() + if err != nil { + return err + } + +Copy Protocol + +Use CopyFrom to efficiently insert multiple rows at a time using the PostgreSQL +copy protocol. CopyFrom accepts a CopyFromSource interface. If the data is already +in a [][]interface{} use CopyFromRows to wrap it in a CopyFromSource interface. Or +implement CopyFromSource to avoid buffering the entire data set in memory. + + rows := [][]interface{}{ + {"John", "Smith", int32(36)}, + {"Jane", "Doe", int32(29)}, + } + + copyCount, err := conn.CopyFrom( + "people", + []string{"first_name", "last_name", "age"}, + pgx.CopyFromRows(rows), + ) + +CopyFrom can be faster than an insert with as few as 5 rows. + +Listen and Notify + +pgx can listen to the PostgreSQL notification system with the +WaitForNotification function. It takes a maximum time to wait for a +notification. + + err := conn.Listen("channelname") + if err != nil { + return nil + } + + if notification, err := conn.WaitForNotification(time.Second); err != nil { + // do something with notification + } + +TLS + +The pgx ConnConfig struct has a TLSConfig field. If this field is +nil, then TLS will be disabled. If it is present, then it will be used to +configure the TLS connection. This allows total configuration of the TLS +connection. + +Logging + +pgx defines a simple logger interface. Connections optionally accept a logger +that satisfies this interface. The log15 package +(http://gopkg.in/inconshreveable/log15.v2) satisfies this interface and it is +simple to define adapters for other loggers. Set LogLevel to control logging +verbosity. +*/ +package pgx diff --git a/vendor/github.com/jackc/pgx/fastpath.go b/vendor/github.com/jackc/pgx/fastpath.go new file mode 100644 index 0000000..19b9878 --- /dev/null +++ b/vendor/github.com/jackc/pgx/fastpath.go @@ -0,0 +1,108 @@ +package pgx + +import ( + "encoding/binary" +) + +func newFastpath(cn *Conn) *fastpath { + return &fastpath{cn: cn, fns: make(map[string]Oid)} +} + +type fastpath struct { + cn *Conn + fns map[string]Oid +} + +func (f *fastpath) functionOID(name string) Oid { + return f.fns[name] +} + +func (f *fastpath) addFunction(name string, oid Oid) { + f.fns[name] = oid +} + +func (f *fastpath) addFunctions(rows *Rows) error { + for rows.Next() { + var name string + var oid Oid + if err := rows.Scan(&name, &oid); err != nil { + return err + } + f.addFunction(name, oid) + } + return rows.Err() +} + +type fpArg []byte + +func fpIntArg(n int32) fpArg { + res := make([]byte, 4) + binary.BigEndian.PutUint32(res, uint32(n)) + return res +} + +func fpInt64Arg(n int64) fpArg { + res := make([]byte, 8) + binary.BigEndian.PutUint64(res, uint64(n)) + return res +} + +func (f *fastpath) Call(oid Oid, args []fpArg) (res []byte, err error) { + wbuf := newWriteBuf(f.cn, 'F') // function call + wbuf.WriteInt32(int32(oid)) // function object id + wbuf.WriteInt16(1) // # of argument format codes + wbuf.WriteInt16(1) // format code: binary + wbuf.WriteInt16(int16(len(args))) // # of arguments + for _, arg := range args { + wbuf.WriteInt32(int32(len(arg))) // length of argument + wbuf.WriteBytes(arg) // argument value + } + wbuf.WriteInt16(1) // response format code (binary) + wbuf.closeMsg() + + if _, err := f.cn.conn.Write(wbuf.buf); err != nil { + return nil, err + } + + for { + var t byte + var r *msgReader + t, r, err = f.cn.rxMsg() + if err != nil { + return nil, err + } + switch t { + case 'V': // FunctionCallResponse + data := r.readBytes(r.readInt32()) + res = make([]byte, len(data)) + copy(res, data) + case 'Z': // Ready for query + f.cn.rxReadyForQuery(r) + // done + return + default: + if err := f.cn.processContextFreeMsg(t, r); err != nil { + return nil, err + } + } + } +} + +func (f *fastpath) CallFn(fn string, args []fpArg) ([]byte, error) { + return f.Call(f.functionOID(fn), args) +} + +func fpInt32(data []byte, err error) (int32, error) { + if err != nil { + return 0, err + } + n := int32(binary.BigEndian.Uint32(data)) + return n, nil +} + +func fpInt64(data []byte, err error) (int64, error) { + if err != nil { + return 0, err + } + return int64(binary.BigEndian.Uint64(data)), nil +} diff --git a/vendor/github.com/jackc/pgx/hstore.go b/vendor/github.com/jackc/pgx/hstore.go new file mode 100644 index 0000000..0ab9f77 --- /dev/null +++ b/vendor/github.com/jackc/pgx/hstore.go @@ -0,0 +1,222 @@ +package pgx + +import ( + "bytes" + "errors" + "fmt" + "unicode" + "unicode/utf8" +) + +const ( + hsPre = iota + hsKey + hsSep + hsVal + hsNul + hsNext +) + +type hstoreParser struct { + str string + pos int +} + +func newHSP(in string) *hstoreParser { + return &hstoreParser{ + pos: 0, + str: in, + } +} + +func (p *hstoreParser) Consume() (r rune, end bool) { + if p.pos >= len(p.str) { + end = true + return + } + r, w := utf8.DecodeRuneInString(p.str[p.pos:]) + p.pos += w + return +} + +func (p *hstoreParser) Peek() (r rune, end bool) { + if p.pos >= len(p.str) { + end = true + return + } + r, _ = utf8.DecodeRuneInString(p.str[p.pos:]) + return +} + +func parseHstoreToMap(s string) (m map[string]string, err error) { + keys, values, err := ParseHstore(s) + if err != nil { + return + } + m = make(map[string]string, len(keys)) + for i, key := range keys { + if !values[i].Valid { + err = fmt.Errorf("key '%s' has NULL value", key) + m = nil + return + } + m[key] = values[i].String + } + return +} + +func parseHstoreToNullHstore(s string) (store map[string]NullString, err error) { + keys, values, err := ParseHstore(s) + if err != nil { + return + } + + store = make(map[string]NullString, len(keys)) + + for i, key := range keys { + store[key] = values[i] + } + return +} + +// ParseHstore parses the string representation of an hstore column (the same +// you would get from an ordinary SELECT) into two slices of keys and values. it +// is used internally in the default parsing of hstores, but is exported for use +// in handling custom data structures backed by an hstore column without the +// overhead of creating a map[string]string +func ParseHstore(s string) (k []string, v []NullString, err error) { + if s == "" { + return + } + + buf := bytes.Buffer{} + keys := []string{} + values := []NullString{} + p := newHSP(s) + + r, end := p.Consume() + state := hsPre + + for !end { + switch state { + case hsPre: + if r == '"' { + state = hsKey + } else { + err = errors.New("String does not begin with \"") + } + case hsKey: + switch r { + case '"': //End of the key + if buf.Len() == 0 { + err = errors.New("Empty Key is invalid") + } else { + keys = append(keys, buf.String()) + buf = bytes.Buffer{} + state = hsSep + } + case '\\': //Potential escaped character + n, end := p.Consume() + switch { + case end: + err = errors.New("Found EOS in key, expecting character or \"") + case n == '"', n == '\\': + buf.WriteRune(n) + default: + buf.WriteRune(r) + buf.WriteRune(n) + } + default: //Any other character + buf.WriteRune(r) + } + case hsSep: + if r == '=' { + r, end = p.Consume() + switch { + case end: + err = errors.New("Found EOS after '=', expecting '>'") + case r == '>': + r, end = p.Consume() + switch { + case end: + err = errors.New("Found EOS after '=>', expecting '\"' or 'NULL'") + case r == '"': + state = hsVal + case r == 'N': + state = hsNul + default: + err = fmt.Errorf("Invalid character '%c' after '=>', expecting '\"' or 'NULL'", r) + } + default: + err = fmt.Errorf("Invalid character after '=', expecting '>'") + } + } else { + err = fmt.Errorf("Invalid character '%c' after value, expecting '='", r) + } + case hsVal: + switch r { + case '"': //End of the value + values = append(values, NullString{String: buf.String(), Valid: true}) + buf = bytes.Buffer{} + state = hsNext + case '\\': //Potential escaped character + n, end := p.Consume() + switch { + case end: + err = errors.New("Found EOS in key, expecting character or \"") + case n == '"', n == '\\': + buf.WriteRune(n) + default: + buf.WriteRune(r) + buf.WriteRune(n) + } + default: //Any other character + buf.WriteRune(r) + } + case hsNul: + nulBuf := make([]rune, 3) + nulBuf[0] = r + for i := 1; i < 3; i++ { + r, end = p.Consume() + if end { + err = errors.New("Found EOS in NULL value") + return + } + nulBuf[i] = r + } + if nulBuf[0] == 'U' && nulBuf[1] == 'L' && nulBuf[2] == 'L' { + values = append(values, NullString{String: "", Valid: false}) + state = hsNext + } else { + err = fmt.Errorf("Invalid NULL value: 'N%s'", string(nulBuf)) + } + case hsNext: + if r == ',' { + r, end = p.Consume() + switch { + case end: + err = errors.New("Found EOS after ',', expcting space") + case (unicode.IsSpace(r)): + r, end = p.Consume() + state = hsKey + default: + err = fmt.Errorf("Invalid character '%c' after ', ', expecting \"", r) + } + } else { + err = fmt.Errorf("Invalid character '%c' after value, expecting ','", r) + } + } + + if err != nil { + return + } + r, end = p.Consume() + } + if state != hsNext { + err = errors.New("Improperly formatted hstore") + return + } + k = keys + v = values + return +} diff --git a/vendor/github.com/jackc/pgx/large_objects.go b/vendor/github.com/jackc/pgx/large_objects.go new file mode 100644 index 0000000..a4922ef --- /dev/null +++ b/vendor/github.com/jackc/pgx/large_objects.go @@ -0,0 +1,147 @@ +package pgx + +import ( + "io" +) + +// LargeObjects is a structure used to access the large objects API. It is only +// valid within the transaction where it was created. +// +// For more details see: http://www.postgresql.org/docs/current/static/largeobjects.html +type LargeObjects struct { + // Has64 is true if the server is capable of working with 64-bit numbers + Has64 bool + fp *fastpath +} + +const largeObjectFns = `select proname, oid from pg_catalog.pg_proc +where proname in ( +'lo_open', +'lo_close', +'lo_create', +'lo_unlink', +'lo_lseek', +'lo_lseek64', +'lo_tell', +'lo_tell64', +'lo_truncate', +'lo_truncate64', +'loread', +'lowrite') +and pronamespace = (select oid from pg_catalog.pg_namespace where nspname = 'pg_catalog')` + +// LargeObjects returns a LargeObjects instance for the transaction. +func (tx *Tx) LargeObjects() (*LargeObjects, error) { + if tx.conn.fp == nil { + tx.conn.fp = newFastpath(tx.conn) + } + if _, exists := tx.conn.fp.fns["lo_open"]; !exists { + res, err := tx.Query(largeObjectFns) + if err != nil { + return nil, err + } + if err := tx.conn.fp.addFunctions(res); err != nil { + return nil, err + } + } + + lo := &LargeObjects{fp: tx.conn.fp} + _, lo.Has64 = lo.fp.fns["lo_lseek64"] + + return lo, nil +} + +type LargeObjectMode int32 + +const ( + LargeObjectModeWrite LargeObjectMode = 0x20000 + LargeObjectModeRead LargeObjectMode = 0x40000 +) + +// Create creates a new large object. If id is zero, the server assigns an +// unused OID. +func (o *LargeObjects) Create(id Oid) (Oid, error) { + newOid, err := fpInt32(o.fp.CallFn("lo_create", []fpArg{fpIntArg(int32(id))})) + return Oid(newOid), err +} + +// Open opens an existing large object with the given mode. +func (o *LargeObjects) Open(oid Oid, mode LargeObjectMode) (*LargeObject, error) { + fd, err := fpInt32(o.fp.CallFn("lo_open", []fpArg{fpIntArg(int32(oid)), fpIntArg(int32(mode))})) + return &LargeObject{fd: fd, lo: o}, err +} + +// Unlink removes a large object from the database. +func (o *LargeObjects) Unlink(oid Oid) error { + _, err := o.fp.CallFn("lo_unlink", []fpArg{fpIntArg(int32(oid))}) + return err +} + +// A LargeObject is a large object stored on the server. It is only valid within +// the transaction that it was initialized in. It implements these interfaces: +// +// io.Writer +// io.Reader +// io.Seeker +// io.Closer +type LargeObject struct { + fd int32 + lo *LargeObjects +} + +// Write writes p to the large object and returns the number of bytes written +// and an error if not all of p was written. +func (o *LargeObject) Write(p []byte) (int, error) { + n, err := fpInt32(o.lo.fp.CallFn("lowrite", []fpArg{fpIntArg(o.fd), p})) + return int(n), err +} + +// Read reads up to len(p) bytes into p returning the number of bytes read. +func (o *LargeObject) Read(p []byte) (int, error) { + res, err := o.lo.fp.CallFn("loread", []fpArg{fpIntArg(o.fd), fpIntArg(int32(len(p)))}) + if len(res) < len(p) { + err = io.EOF + } + return copy(p, res), err +} + +// Seek moves the current location pointer to the new location specified by offset. +func (o *LargeObject) Seek(offset int64, whence int) (n int64, err error) { + if o.lo.Has64 { + n, err = fpInt64(o.lo.fp.CallFn("lo_lseek64", []fpArg{fpIntArg(o.fd), fpInt64Arg(offset), fpIntArg(int32(whence))})) + } else { + var n32 int32 + n32, err = fpInt32(o.lo.fp.CallFn("lo_lseek", []fpArg{fpIntArg(o.fd), fpIntArg(int32(offset)), fpIntArg(int32(whence))})) + n = int64(n32) + } + return +} + +// Tell returns the current read or write location of the large object +// descriptor. +func (o *LargeObject) Tell() (n int64, err error) { + if o.lo.Has64 { + n, err = fpInt64(o.lo.fp.CallFn("lo_tell64", []fpArg{fpIntArg(o.fd)})) + } else { + var n32 int32 + n32, err = fpInt32(o.lo.fp.CallFn("lo_tell", []fpArg{fpIntArg(o.fd)})) + n = int64(n32) + } + return +} + +// Trunctes the large object to size. +func (o *LargeObject) Truncate(size int64) (err error) { + if o.lo.Has64 { + _, err = o.lo.fp.CallFn("lo_truncate64", []fpArg{fpIntArg(o.fd), fpInt64Arg(size)}) + } else { + _, err = o.lo.fp.CallFn("lo_truncate", []fpArg{fpIntArg(o.fd), fpIntArg(int32(size))}) + } + return +} + +// Close closees the large object descriptor. +func (o *LargeObject) Close() error { + _, err := o.lo.fp.CallFn("lo_close", []fpArg{fpIntArg(o.fd)}) + return err +} diff --git a/vendor/github.com/jackc/pgx/logger.go b/vendor/github.com/jackc/pgx/logger.go new file mode 100644 index 0000000..4423325 --- /dev/null +++ b/vendor/github.com/jackc/pgx/logger.go @@ -0,0 +1,81 @@ +package pgx + +import ( + "encoding/hex" + "errors" + "fmt" +) + +// The values for log levels are chosen such that the zero value means that no +// log level was specified and we can default to LogLevelDebug to preserve +// the behavior that existed prior to log level introduction. +const ( + LogLevelTrace = 6 + LogLevelDebug = 5 + LogLevelInfo = 4 + LogLevelWarn = 3 + LogLevelError = 2 + LogLevelNone = 1 +) + +// Logger is the interface used to get logging from pgx internals. +// https://github.com/inconshreveable/log15 is the recommended logging package. +// This logging interface was extracted from there. However, it should be simple +// to adapt any logger to this interface. +type Logger interface { + // Log a message at the given level with context key/value pairs + Debug(msg string, ctx ...interface{}) + Info(msg string, ctx ...interface{}) + Warn(msg string, ctx ...interface{}) + Error(msg string, ctx ...interface{}) +} + +// LogLevelFromString converts log level string to constant +// +// Valid levels: +// trace +// debug +// info +// warn +// error +// none +func LogLevelFromString(s string) (int, error) { + switch s { + case "trace": + return LogLevelTrace, nil + case "debug": + return LogLevelDebug, nil + case "info": + return LogLevelInfo, nil + case "warn": + return LogLevelWarn, nil + case "error": + return LogLevelError, nil + case "none": + return LogLevelNone, nil + default: + return 0, errors.New("invalid log level") + } +} + +func logQueryArgs(args []interface{}) []interface{} { + logArgs := make([]interface{}, 0, len(args)) + + for _, a := range args { + switch v := a.(type) { + case []byte: + if len(v) < 64 { + a = hex.EncodeToString(v) + } else { + a = fmt.Sprintf("%x (truncated %d bytes)", v[:64], len(v)-64) + } + case string: + if len(v) > 64 { + a = fmt.Sprintf("%s (truncated %d bytes)", v[:64], len(v)-64) + } + } + logArgs = append(logArgs, a) + } + + return logArgs +} diff --git a/vendor/github.com/jackc/pgx/messages.go b/vendor/github.com/jackc/pgx/messages.go new file mode 100644 index 0000000..317ba27 --- /dev/null +++ b/vendor/github.com/jackc/pgx/messages.go @@ -0,0 +1,159 @@ +package pgx + +import ( + "encoding/binary" +) + +const ( + protocolVersionNumber = 196608 // 3.0 +) + +const ( + backendKeyData = 'K' + authenticationX = 'R' + readyForQuery = 'Z' + rowDescription = 'T' + dataRow = 'D' + commandComplete = 'C' + errorResponse = 'E' + noticeResponse = 'N' + parseComplete = '1' + parameterDescription = 't' + bindComplete = '2' + notificationResponse = 'A' + emptyQueryResponse = 'I' + noData = 'n' + closeComplete = '3' + flush = 'H' + copyInResponse = 'G' + copyData = 'd' + copyFail = 'f' + copyDone = 'c' +) + +type startupMessage struct { + options map[string]string +} + +func newStartupMessage() *startupMessage { + return &startupMessage{map[string]string{}} +} + +func (s *startupMessage) Bytes() (buf []byte) { + buf = make([]byte, 8, 128) + binary.BigEndian.PutUint32(buf[4:8], uint32(protocolVersionNumber)) + for key, value := range s.options { + buf = append(buf, key...) + buf = append(buf, 0) + buf = append(buf, value...) + buf = append(buf, 0) + } + buf = append(buf, ("\000")...) + binary.BigEndian.PutUint32(buf[0:4], uint32(len(buf))) + return buf +} + +type FieldDescription struct { + Name string + Table Oid + AttributeNumber int16 + DataType Oid + DataTypeSize int16 + DataTypeName string + Modifier int32 + FormatCode int16 +} + +// PgError represents an error reported by the PostgreSQL server. See +// http://www.postgresql.org/docs/9.3/static/protocol-error-fields.html for +// detailed field description. +type PgError struct { + Severity string + Code string + Message string + Detail string + Hint string + Position int32 + InternalPosition int32 + InternalQuery string + Where string + SchemaName string + TableName string + ColumnName string + DataTypeName string + ConstraintName string + File string + Line int32 + Routine string +} + +func (pe PgError) Error() string { + return pe.Severity + ": " + pe.Message + " (SQLSTATE " + pe.Code + ")" +} + +func newWriteBuf(c *Conn, t byte) *WriteBuf { + buf := append(c.wbuf[0:0], t, 0, 0, 0, 0) + c.writeBuf = WriteBuf{buf: buf, sizeIdx: 1, conn: c} + return &c.writeBuf +} + +// WriteBuf is used build messages to send to the PostgreSQL server. It is used +// by the Encoder interface when implementing custom encoders. +type WriteBuf struct { + buf []byte + sizeIdx int + conn *Conn +} + +func (wb *WriteBuf) startMsg(t byte) { + wb.closeMsg() + wb.buf = append(wb.buf, t, 0, 0, 0, 0) + wb.sizeIdx = len(wb.buf) - 4 +} + +func (wb *WriteBuf) closeMsg() { + binary.BigEndian.PutUint32(wb.buf[wb.sizeIdx:wb.sizeIdx+4], uint32(len(wb.buf)-wb.sizeIdx)) +} + +func (wb *WriteBuf) WriteByte(b byte) { + wb.buf = append(wb.buf, b) +} + +func (wb *WriteBuf) WriteCString(s string) { + wb.buf = append(wb.buf, []byte(s)...) + wb.buf = append(wb.buf, 0) +} + +func (wb *WriteBuf) WriteInt16(n int16) { + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, uint16(n)) + wb.buf = append(wb.buf, b...) +} + +func (wb *WriteBuf) WriteUint16(n uint16) { + b := make([]byte, 2) + binary.BigEndian.PutUint16(b, n) + wb.buf = append(wb.buf, b...) +} + +func (wb *WriteBuf) WriteInt32(n int32) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(n)) + wb.buf = append(wb.buf, b...) +} + +func (wb *WriteBuf) WriteUint32(n uint32) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, n) + wb.buf = append(wb.buf, b...) +} + +func (wb *WriteBuf) WriteInt64(n int64) { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(n)) + wb.buf = append(wb.buf, b...) +} + +func (wb *WriteBuf) WriteBytes(b []byte) { + wb.buf = append(wb.buf, b...) +} diff --git a/vendor/github.com/jackc/pgx/msg_reader.go b/vendor/github.com/jackc/pgx/msg_reader.go new file mode 100644 index 0000000..21db5d2 --- /dev/null +++ b/vendor/github.com/jackc/pgx/msg_reader.go @@ -0,0 +1,316 @@ +package pgx + +import ( + "bufio" + "encoding/binary" + "errors" + "io" +) + +// msgReader is a helper that reads values from a PostgreSQL message. +type msgReader struct { + reader *bufio.Reader + msgBytesRemaining int32 + err error + log func(lvl int, msg string, ctx ...interface{}) + shouldLog func(lvl int) bool +} + +// Err returns any error that the msgReader has experienced +func (r *msgReader) Err() error { + return r.err +} + +// fatal tells rc that a Fatal error has occurred +func (r *msgReader) fatal(err error) { + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.fatal", "error", err, "msgBytesRemaining", r.msgBytesRemaining) + } + r.err = err +} + +// rxMsg reads the type and size of the next message. +func (r *msgReader) rxMsg() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if r.msgBytesRemaining > 0 { + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.rxMsg discarding unread previous message", "msgBytesRemaining", r.msgBytesRemaining) + } + + _, err := r.reader.Discard(int(r.msgBytesRemaining)) + if err != nil { + return 0, err + } + } + + b, err := r.reader.Peek(5) + if err != nil { + r.fatal(err) + return 0, err + } + msgType := b[0] + r.msgBytesRemaining = int32(binary.BigEndian.Uint32(b[1:])) - 4 + r.reader.Discard(5) + return msgType, nil +} + +func (r *msgReader) readByte() byte { + if r.err != nil { + return 0 + } + + r.msgBytesRemaining-- + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return 0 + } + + b, err := r.reader.ReadByte() + if err != nil { + r.fatal(err) + return 0 + } + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readByte", "value", b, "byteAsString", string(b), "msgBytesRemaining", r.msgBytesRemaining) + } + + return b +} + +func (r *msgReader) readInt16() int16 { + if r.err != nil { + return 0 + } + + r.msgBytesRemaining -= 2 + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return 0 + } + + b, err := r.reader.Peek(2) + if err != nil { + r.fatal(err) + return 0 + } + + n := int16(binary.BigEndian.Uint16(b)) + + r.reader.Discard(2) + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readInt16", "value", n, "msgBytesRemaining", r.msgBytesRemaining) + } + + return n +} + +func (r *msgReader) readInt32() int32 { + if r.err != nil { + return 0 + } + + r.msgBytesRemaining -= 4 + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return 0 + } + + b, err := r.reader.Peek(4) + if err != nil { + r.fatal(err) + return 0 + } + + n := int32(binary.BigEndian.Uint32(b)) + + r.reader.Discard(4) + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readInt32", "value", n, "msgBytesRemaining", r.msgBytesRemaining) + } + + return n +} + +func (r *msgReader) readUint16() uint16 { + if r.err != nil { + return 0 + } + + r.msgBytesRemaining -= 2 + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return 0 + } + + b, err := r.reader.Peek(2) + if err != nil { + r.fatal(err) + return 0 + } + + n := uint16(binary.BigEndian.Uint16(b)) + + r.reader.Discard(2) + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readUint16", "value", n, "msgBytesRemaining", r.msgBytesRemaining) + } + + return n +} + +func (r *msgReader) readUint32() uint32 { + if r.err != nil { + return 0 + } + + r.msgBytesRemaining -= 4 + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return 0 + } + + b, err := r.reader.Peek(4) + if err != nil { + r.fatal(err) + return 0 + } + + n := uint32(binary.BigEndian.Uint32(b)) + + r.reader.Discard(4) + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readUint32", "value", n, "msgBytesRemaining", r.msgBytesRemaining) + } + + return n +} + +func (r *msgReader) readInt64() int64 { + if r.err != nil { + return 0 + } + + r.msgBytesRemaining -= 8 + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return 0 + } + + b, err := r.reader.Peek(8) + if err != nil { + r.fatal(err) + return 0 + } + + n := int64(binary.BigEndian.Uint64(b)) + + r.reader.Discard(8) + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readInt64", "value", n, "msgBytesRemaining", r.msgBytesRemaining) + } + + return n +} + +func (r *msgReader) readOid() Oid { + return Oid(r.readInt32()) +} + +// readCString reads a null terminated string +func (r *msgReader) readCString() string { + if r.err != nil { + return "" + } + + b, err := r.reader.ReadBytes(0) + if err != nil { + r.fatal(err) + return "" + } + + r.msgBytesRemaining -= int32(len(b)) + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return "" + } + + s := string(b[0 : len(b)-1]) + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readCString", "value", s, "msgBytesRemaining", r.msgBytesRemaining) + } + + return s +} + +// readString reads count bytes and returns as string +func (r *msgReader) readString(countI32 int32) string { + if r.err != nil { + return "" + } + + r.msgBytesRemaining -= countI32 + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return "" + } + + count := int(countI32) + var s string + + if r.reader.Buffered() >= count { + buf, _ := r.reader.Peek(count) + s = string(buf) + r.reader.Discard(count) + } else { + buf := make([]byte, count) + _, err := io.ReadFull(r.reader, buf) + if err != nil { + r.fatal(err) + return "" + } + s = string(buf) + } + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readString", "value", s, "msgBytesRemaining", r.msgBytesRemaining) + } + + return s +} + +// readBytes reads count bytes and returns as []byte +func (r *msgReader) readBytes(count int32) []byte { + if r.err != nil { + return nil + } + + r.msgBytesRemaining -= count + if r.msgBytesRemaining < 0 { + r.fatal(errors.New("read past end of message")) + return nil + } + + b := make([]byte, int(count)) + + _, err := io.ReadFull(r.reader, b) + if err != nil { + r.fatal(err) + return nil + } + + if r.shouldLog(LogLevelTrace) { + r.log(LogLevelTrace, "msgReader.readBytes", "value", b, "msgBytesRemaining", r.msgBytesRemaining) + } + + return b +} diff --git a/vendor/github.com/jackc/pgx/pgpass.go b/vendor/github.com/jackc/pgx/pgpass.go new file mode 100644 index 0000000..b6f028d --- /dev/null +++ b/vendor/github.com/jackc/pgx/pgpass.go @@ -0,0 +1,85 @@ +package pgx + +import ( + "bufio" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" +) + +func parsepgpass(cfg *ConnConfig, line string) *string { + const ( + backslash = "\r" + colon = "\n" + ) + const ( + host int = iota + port + database + username + pw + ) + line = strings.Replace(line, `\:`, colon, -1) + line = strings.Replace(line, `\\`, backslash, -1) + parts := strings.Split(line, `:`) + if len(parts) != 5 { + return nil + } + for i := range parts { + if parts[i] == `*` { + continue + } + parts[i] = strings.Replace(strings.Replace(parts[i], backslash, `\`, -1), colon, `:`, -1) + switch i { + case host: + if parts[i] != cfg.Host { + return nil + } + case port: + portstr := fmt.Sprintf(`%v`, cfg.Port) + if portstr == "0" { + portstr = "5432" + } + if parts[i] != portstr { + return nil + } + case database: + if parts[i] != cfg.Database { + return nil + } + case username: + if parts[i] != cfg.User { + return nil + } + } + } + return &parts[4] +} + +func pgpass(cfg *ConnConfig) (found bool) { + passfile := os.Getenv("PGPASSFILE") + if passfile == "" { + u, err := user.Current() + if err != nil { + return + } + passfile = filepath.Join(u.HomeDir, ".pgpass") + } + f, err := os.Open(passfile) + if err != nil { + return + } + defer f.Close() + scanner := bufio.NewScanner(f) + var pw *string + for scanner.Scan() { + pw = parsepgpass(cfg, scanner.Text()) + if pw != nil { + cfg.Password = *pw + return true + } + } + return false +} diff --git a/vendor/github.com/jackc/pgx/query.go b/vendor/github.com/jackc/pgx/query.go new file mode 100644 index 0000000..19b867e --- /dev/null +++ b/vendor/github.com/jackc/pgx/query.go @@ -0,0 +1,494 @@ +package pgx + +import ( + "database/sql" + "errors" + "fmt" + "time" +) + +// Row is a convenience wrapper over Rows that is returned by QueryRow. +type Row Rows + +// Scan works the same as (*Rows Scan) with the following exceptions. If no +// rows were found it returns ErrNoRows. If multiple rows are returned it +// ignores all but the first. +func (r *Row) Scan(dest ...interface{}) (err error) { + rows := (*Rows)(r) + + if rows.Err() != nil { + return rows.Err() + } + + if !rows.Next() { + if rows.Err() == nil { + return ErrNoRows + } + return rows.Err() + } + + rows.Scan(dest...) + rows.Close() + return rows.Err() +} + +// Rows is the result set returned from *Conn.Query. Rows must be closed before +// the *Conn can be used again. Rows are closed by explicitly calling Close(), +// calling Next() until it returns false, or when a fatal error occurs. +type Rows struct { + conn *Conn + mr *msgReader + fields []FieldDescription + vr ValueReader + rowCount int + columnIdx int + err error + startTime time.Time + sql string + args []interface{} + afterClose func(*Rows) + unlockConn bool + closed bool +} + +func (rows *Rows) FieldDescriptions() []FieldDescription { + return rows.fields +} + +func (rows *Rows) close() { + if rows.closed { + return + } + + if rows.unlockConn { + rows.conn.unlock() + rows.unlockConn = false + } + + rows.closed = true + + if rows.err == nil { + if rows.conn.shouldLog(LogLevelInfo) { + endTime := time.Now() + rows.conn.log(LogLevelInfo, "Query", "sql", rows.sql, "args", logQueryArgs(rows.args), "time", endTime.Sub(rows.startTime), "rowCount", rows.rowCount) + } + } else if rows.conn.shouldLog(LogLevelError) { + rows.conn.log(LogLevelError, "Query", "sql", rows.sql, "args", logQueryArgs(rows.args)) + } + + if rows.afterClose != nil { + rows.afterClose(rows) + } +} + +func (rows *Rows) readUntilReadyForQuery() { + for { + t, r, err := rows.conn.rxMsg() + if err != nil { + rows.close() + return + } + + switch t { + case readyForQuery: + rows.conn.rxReadyForQuery(r) + rows.close() + return + case rowDescription: + case dataRow: + case commandComplete: + case bindComplete: + case errorResponse: + err = rows.conn.rxErrorResponse(r) + if rows.err == nil { + rows.err = err + } + default: + err = rows.conn.processContextFreeMsg(t, r) + if err != nil { + rows.close() + return + } + } + } +} + +// Close closes the rows, making the connection ready for use again. It is safe +// to call Close after rows is already closed. +func (rows *Rows) Close() { + if rows.closed { + return + } + rows.readUntilReadyForQuery() + rows.close() +} + +func (rows *Rows) Err() error { + return rows.err +} + +// abort signals that the query was not successfully sent to the server. +// This differs from Fatal in that it is not necessary to readUntilReadyForQuery +func (rows *Rows) abort(err error) { + if rows.err != nil { + return + } + + rows.err = err + rows.close() +} + +// Fatal signals an error occurred after the query was sent to the server. It +// closes the rows automatically. +func (rows *Rows) Fatal(err error) { + if rows.err != nil { + return + } + + rows.err = err + rows.Close() +} + +// Next prepares the next row for reading. It returns true if there is another +// row and false if no more rows are available. It automatically closes rows +// when all rows are read. +func (rows *Rows) Next() bool { + if rows.closed { + return false + } + + rows.rowCount++ + rows.columnIdx = 0 + rows.vr = ValueReader{} + + for { + t, r, err := rows.conn.rxMsg() + if err != nil { + rows.Fatal(err) + return false + } + + switch t { + case readyForQuery: + rows.conn.rxReadyForQuery(r) + rows.close() + return false + case dataRow: + fieldCount := r.readInt16() + if int(fieldCount) != len(rows.fields) { + rows.Fatal(ProtocolError(fmt.Sprintf("Row description field count (%v) and data row field count (%v) do not match", len(rows.fields), fieldCount))) + return false + } + + rows.mr = r + return true + case commandComplete: + case bindComplete: + default: + err = rows.conn.processContextFreeMsg(t, r) + if err != nil { + rows.Fatal(err) + return false + } + } + } +} + +// Conn returns the *Conn this *Rows is using. +func (rows *Rows) Conn() *Conn { + return rows.conn +} + +func (rows *Rows) nextColumn() (*ValueReader, bool) { + if rows.closed { + return nil, false + } + if len(rows.fields) <= rows.columnIdx { + rows.Fatal(ProtocolError("No next column available")) + return nil, false + } + + if rows.vr.Len() > 0 { + rows.mr.readBytes(rows.vr.Len()) + } + + fd := &rows.fields[rows.columnIdx] + rows.columnIdx++ + size := rows.mr.readInt32() + rows.vr = ValueReader{mr: rows.mr, fd: fd, valueBytesRemaining: size} + return &rows.vr, true +} + +type scanArgError struct { + col int + err error +} + +func (e scanArgError) Error() string { + return fmt.Sprintf("can't scan into dest[%d]: %v", e.col, e.err) +} + +// Scan reads the values from the current row into dest values positionally. +// dest can include pointers to core types, values implementing the Scanner +// interface, []byte, and nil. []byte will skip the decoding process and directly +// copy the raw bytes received from PostgreSQL. nil will skip the value entirely. +func (rows *Rows) Scan(dest ...interface{}) (err error) { + if len(rows.fields) != len(dest) { + err = fmt.Errorf("Scan received wrong number of arguments, got %d but expected %d", len(dest), len(rows.fields)) + rows.Fatal(err) + return err + } + + for i, d := range dest { + vr, _ := rows.nextColumn() + + if d == nil { + continue + } + + // Check for []byte first as we allow sidestepping the decoding process and retrieving the raw bytes + if b, ok := d.(*[]byte); ok { + // If it actually is a bytea then pass it through decodeBytea (so it can be decoded if it is in text format) + // Otherwise read the bytes directly regardless of what the actual type is. + if vr.Type().DataType == ByteaOid { + *b = decodeBytea(vr) + } else { + if vr.Len() != -1 { + *b = vr.ReadBytes(vr.Len()) + } else { + *b = nil + } + } + } else if s, ok := d.(Scanner); ok { + err = s.Scan(vr) + if err != nil { + rows.Fatal(scanArgError{col: i, err: err}) + } + } else if s, ok := d.(PgxScanner); ok { + err = s.ScanPgx(vr) + if err != nil { + rows.Fatal(scanArgError{col: i, err: err}) + } + } else if s, ok := d.(sql.Scanner); ok { + var val interface{} + if 0 <= vr.Len() { + switch vr.Type().DataType { + case BoolOid: + val = decodeBool(vr) + case Int8Oid: + val = int64(decodeInt8(vr)) + case Int2Oid: + val = int64(decodeInt2(vr)) + case Int4Oid: + val = int64(decodeInt4(vr)) + case TextOid, VarcharOid: + val = decodeText(vr) + case OidOid: + val = int64(decodeOid(vr)) + case Float4Oid: + val = float64(decodeFloat4(vr)) + case Float8Oid: + val = decodeFloat8(vr) + case DateOid: + val = decodeDate(vr) + case TimestampOid: + val = decodeTimestamp(vr) + case TimestampTzOid: + val = decodeTimestampTz(vr) + default: + val = vr.ReadBytes(vr.Len()) + } + } + err = s.Scan(val) + if err != nil { + rows.Fatal(scanArgError{col: i, err: err}) + } + } else if vr.Type().DataType == JsonOid { + // Because the argument passed to decodeJSON will escape the heap. + // This allows d to be stack allocated and only copied to the heap when + // we actually are decoding JSON. This saves one memory allocation per + // row. + d2 := d + decodeJSON(vr, &d2) + } else if vr.Type().DataType == JsonbOid { + // Same trick as above for getting stack allocation + d2 := d + decodeJSONB(vr, &d2) + } else { + if err := Decode(vr, d); err != nil { + rows.Fatal(scanArgError{col: i, err: err}) + } + } + if vr.Err() != nil { + rows.Fatal(scanArgError{col: i, err: vr.Err()}) + } + + if rows.Err() != nil { + return rows.Err() + } + } + + return nil +} + +// Values returns an array of the row values +func (rows *Rows) Values() ([]interface{}, error) { + if rows.closed { + return nil, errors.New("rows is closed") + } + + values := make([]interface{}, 0, len(rows.fields)) + + for range rows.fields { + vr, _ := rows.nextColumn() + + if vr.Len() == -1 { + values = append(values, nil) + continue + } + + switch vr.Type().FormatCode { + // All intrinsic types (except string) are encoded with binary + // encoding so anything else should be treated as a string + case TextFormatCode: + values = append(values, vr.ReadString(vr.Len())) + case BinaryFormatCode: + switch vr.Type().DataType { + case TextOid, VarcharOid: + values = append(values, decodeText(vr)) + case BoolOid: + values = append(values, decodeBool(vr)) + case ByteaOid: + values = append(values, decodeBytea(vr)) + case Int8Oid: + values = append(values, decodeInt8(vr)) + case Int2Oid: + values = append(values, decodeInt2(vr)) + case Int4Oid: + values = append(values, decodeInt4(vr)) + case OidOid: + values = append(values, decodeOid(vr)) + case Float4Oid: + values = append(values, decodeFloat4(vr)) + case Float8Oid: + values = append(values, decodeFloat8(vr)) + case BoolArrayOid: + values = append(values, decodeBoolArray(vr)) + case Int2ArrayOid: + values = append(values, decodeInt2Array(vr)) + case Int4ArrayOid: + values = append(values, decodeInt4Array(vr)) + case Int8ArrayOid: + values = append(values, decodeInt8Array(vr)) + case Float4ArrayOid: + values = append(values, decodeFloat4Array(vr)) + case Float8ArrayOid: + values = append(values, decodeFloat8Array(vr)) + case TextArrayOid, VarcharArrayOid: + values = append(values, decodeTextArray(vr)) + case TimestampArrayOid, TimestampTzArrayOid: + values = append(values, decodeTimestampArray(vr)) + case DateOid: + values = append(values, decodeDate(vr)) + case TimestampTzOid: + values = append(values, decodeTimestampTz(vr)) + case TimestampOid: + values = append(values, decodeTimestamp(vr)) + case InetOid, CidrOid: + values = append(values, decodeInet(vr)) + case JsonOid: + var d interface{} + decodeJSON(vr, &d) + values = append(values, d) + case JsonbOid: + var d interface{} + decodeJSONB(vr, &d) + values = append(values, d) + default: + rows.Fatal(errors.New("Values cannot handle binary format non-intrinsic types")) + } + default: + rows.Fatal(errors.New("Unknown format code")) + } + + if vr.Err() != nil { + rows.Fatal(vr.Err()) + } + + if rows.Err() != nil { + return nil, rows.Err() + } + } + + return values, rows.Err() +} + +// AfterClose adds f to a LILO queue of functions that will be called when +// rows is closed. +func (rows *Rows) AfterClose(f func(*Rows)) { + if rows.afterClose == nil { + rows.afterClose = f + } else { + prevFn := rows.afterClose + rows.afterClose = func(rows *Rows) { + f(rows) + prevFn(rows) + } + } +} + +// Query executes sql with args. If there is an error the returned *Rows will +// be returned in an error state. So it is allowed to ignore the error returned +// from Query and handle it in *Rows. +func (c *Conn) Query(sql string, args ...interface{}) (*Rows, error) { + c.lastActivityTime = time.Now() + + rows := c.getRows(sql, args) + + if err := c.lock(); err != nil { + rows.abort(err) + return rows, err + } + rows.unlockConn = true + + ps, ok := c.preparedStatements[sql] + if !ok { + var err error + ps, err = c.Prepare("", sql) + if err != nil { + rows.abort(err) + return rows, rows.err + } + } + rows.sql = ps.SQL + rows.fields = ps.FieldDescriptions + err := c.sendPreparedQuery(ps, args...) + if err != nil { + rows.abort(err) + } + return rows, rows.err +} + +func (c *Conn) getRows(sql string, args []interface{}) *Rows { + if len(c.preallocatedRows) == 0 { + c.preallocatedRows = make([]Rows, 64) + } + + r := &c.preallocatedRows[len(c.preallocatedRows)-1] + c.preallocatedRows = c.preallocatedRows[0 : len(c.preallocatedRows)-1] + + r.conn = c + r.startTime = c.lastActivityTime + r.sql = sql + r.args = args + + return r +} + +// QueryRow is a convenience wrapper over Query. Any error that occurs while +// querying is deferred until calling Scan on the returned *Row. That *Row will +// error with ErrNoRows if no rows are returned. +func (c *Conn) QueryRow(sql string, args ...interface{}) *Row { + rows, _ := c.Query(sql, args...) + return (*Row)(rows) +} diff --git a/vendor/github.com/jackc/pgx/replication.go b/vendor/github.com/jackc/pgx/replication.go new file mode 100644 index 0000000..7b28d6b --- /dev/null +++ b/vendor/github.com/jackc/pgx/replication.go @@ -0,0 +1,429 @@ +package pgx + +import ( + "errors" + "fmt" + "net" + "time" +) + +const ( + copyBothResponse = 'W' + walData = 'w' + senderKeepalive = 'k' + standbyStatusUpdate = 'r' + initialReplicationResponseTimeout = 5 * time.Second +) + +var epochNano int64 + +func init() { + epochNano = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).UnixNano() +} + +// Format the given 64bit LSN value into the XXX/XXX format, +// which is the format reported by postgres. +func FormatLSN(lsn uint64) string { + return fmt.Sprintf("%X/%X", uint32(lsn>>32), uint32(lsn)) +} + +// Parse the given XXX/XXX format LSN as reported by postgres, +// into a 64 bit integer as used internally by the wire procotols +func ParseLSN(lsn string) (outputLsn uint64, err error) { + var upperHalf uint64 + var lowerHalf uint64 + var nparsed int + nparsed, err = fmt.Sscanf(lsn, "%X/%X", &upperHalf, &lowerHalf) + if err != nil { + return + } + + if nparsed != 2 { + err = errors.New(fmt.Sprintf("Failed to parsed LSN: %s", lsn)) + return + } + + outputLsn = (upperHalf << 32) + lowerHalf + return +} + +// The WAL message contains WAL payload entry data +type WalMessage struct { + // The WAL start position of this data. This + // is the WAL position we need to track. + WalStart uint64 + // The server wal end and server time are + // documented to track the end position and current + // time of the server, both of which appear to be + // unimplemented in pg 9.5. + ServerWalEnd uint64 + ServerTime uint64 + // The WAL data is the raw unparsed binary WAL entry. + // The contents of this are determined by the output + // logical encoding plugin. + WalData []byte +} + +func (w *WalMessage) Time() time.Time { + return time.Unix(0, (int64(w.ServerTime)*1000)+epochNano) +} + +func (w *WalMessage) ByteLag() uint64 { + return (w.ServerWalEnd - w.WalStart) +} + +func (w *WalMessage) String() string { + return fmt.Sprintf("Wal: %s Time: %s Lag: %d", FormatLSN(w.WalStart), w.Time(), w.ByteLag()) +} + +// The server heartbeat is sent periodically from the server, +// including server status, and a reply request field +type ServerHeartbeat struct { + // The current max wal position on the server, + // used for lag tracking + ServerWalEnd uint64 + // The server time, in microseconds since jan 1 2000 + ServerTime uint64 + // If 1, the server is requesting a standby status message + // to be sent immediately. + ReplyRequested byte +} + +func (s *ServerHeartbeat) Time() time.Time { + return time.Unix(0, (int64(s.ServerTime)*1000)+epochNano) +} + +func (s *ServerHeartbeat) String() string { + return fmt.Sprintf("WalEnd: %s ReplyRequested: %d T: %s", FormatLSN(s.ServerWalEnd), s.ReplyRequested, s.Time()) +} + +// The replication message wraps all possible messages from the +// server received during replication. At most one of the wal message +// or server heartbeat will be non-nil +type ReplicationMessage struct { + WalMessage *WalMessage + ServerHeartbeat *ServerHeartbeat +} + +// The standby status is the client side heartbeat sent to the postgresql +// server to track the client wal positions. For practical purposes, +// all wal positions are typically set to the same value. +type StandbyStatus struct { + // The WAL position that's been locally written + WalWritePosition uint64 + // The WAL position that's been locally flushed + WalFlushPosition uint64 + // The WAL position that's been locally applied + WalApplyPosition uint64 + // The client time in microseconds since jan 1 2000 + ClientTime uint64 + // If 1, requests the server to immediately send a + // server heartbeat + ReplyRequested byte +} + +// Create a standby status struct, which sets all the WAL positions +// to the given wal position, and the client time to the current time. +// The wal positions are, in order: +// WalFlushPosition +// WalApplyPosition +// WalWritePosition +// +// If only one position is provided, it will be used as the value for all 3 +// status fields. Note you must provide either 1 wal position, or all 3 +// in order to initialize the standby status. +func NewStandbyStatus(walPositions ...uint64) (status *StandbyStatus, err error) { + if len(walPositions) == 1 { + status = new(StandbyStatus) + status.WalFlushPosition = walPositions[0] + status.WalApplyPosition = walPositions[0] + status.WalWritePosition = walPositions[0] + } else if len(walPositions) == 3 { + status = new(StandbyStatus) + status.WalFlushPosition = walPositions[0] + status.WalApplyPosition = walPositions[1] + status.WalWritePosition = walPositions[2] + } else { + err = errors.New(fmt.Sprintf("Invalid number of wal positions provided, need 1 or 3, got %d", len(walPositions))) + return + } + status.ClientTime = uint64((time.Now().UnixNano() - epochNano) / 1000) + return +} + +func ReplicationConnect(config ConnConfig) (r *ReplicationConn, err error) { + if config.RuntimeParams == nil { + config.RuntimeParams = make(map[string]string) + } + config.RuntimeParams["replication"] = "database" + + c, err := Connect(config) + if err != nil { + return + } + return &ReplicationConn{c: c}, nil +} + +type ReplicationConn struct { + c *Conn +} + +// Send standby status to the server, which both acts as a keepalive +// message to the server, as well as carries the WAL position of the +// client, which then updates the server's replication slot position. +func (rc *ReplicationConn) SendStandbyStatus(k *StandbyStatus) (err error) { + writeBuf := newWriteBuf(rc.c, copyData) + writeBuf.WriteByte(standbyStatusUpdate) + writeBuf.WriteInt64(int64(k.WalWritePosition)) + writeBuf.WriteInt64(int64(k.WalFlushPosition)) + writeBuf.WriteInt64(int64(k.WalApplyPosition)) + writeBuf.WriteInt64(int64(k.ClientTime)) + writeBuf.WriteByte(k.ReplyRequested) + + writeBuf.closeMsg() + + _, err = rc.c.conn.Write(writeBuf.buf) + if err != nil { + rc.c.die(err) + } + + return +} + +func (rc *ReplicationConn) Close() error { + return rc.c.Close() +} + +func (rc *ReplicationConn) IsAlive() bool { + return rc.c.IsAlive() +} + +func (rc *ReplicationConn) CauseOfDeath() error { + return rc.c.CauseOfDeath() +} + +func (rc *ReplicationConn) readReplicationMessage() (r *ReplicationMessage, err error) { + var t byte + var reader *msgReader + t, reader, err = rc.c.rxMsg() + if err != nil { + return + } + + switch t { + case noticeResponse: + pgError := rc.c.rxErrorResponse(reader) + if rc.c.shouldLog(LogLevelInfo) { + rc.c.log(LogLevelInfo, pgError.Error()) + } + case errorResponse: + err = rc.c.rxErrorResponse(reader) + if rc.c.shouldLog(LogLevelError) { + rc.c.log(LogLevelError, err.Error()) + } + return + case copyBothResponse: + // This is the tail end of the replication process start, + // and can be safely ignored + return + case copyData: + var msgType byte + msgType = reader.readByte() + switch msgType { + case walData: + walStart := reader.readInt64() + serverWalEnd := reader.readInt64() + serverTime := reader.readInt64() + walData := reader.readBytes(reader.msgBytesRemaining) + walMessage := WalMessage{WalStart: uint64(walStart), + ServerWalEnd: uint64(serverWalEnd), + ServerTime: uint64(serverTime), + WalData: walData, + } + + return &ReplicationMessage{WalMessage: &walMessage}, nil + case senderKeepalive: + serverWalEnd := reader.readInt64() + serverTime := reader.readInt64() + replyNow := reader.readByte() + h := &ServerHeartbeat{ServerWalEnd: uint64(serverWalEnd), ServerTime: uint64(serverTime), ReplyRequested: replyNow} + return &ReplicationMessage{ServerHeartbeat: h}, nil + default: + if rc.c.shouldLog(LogLevelError) { + rc.c.log(LogLevelError, "Unexpected data playload message type %v", t) + } + } + default: + if rc.c.shouldLog(LogLevelError) { + rc.c.log(LogLevelError, "Unexpected replication message type %v", t) + } + } + return +} + +// Wait for a single replication message up to timeout time. +// +// Properly using this requires some knowledge of the postgres replication mechanisms, +// as the client can receive both WAL data (the ultimate payload) and server heartbeat +// updates. The caller also must send standby status updates in order to keep the connection +// alive and working. +// +// This returns pgx.ErrNotificationTimeout when there is no replication message by the specified +// duration. +func (rc *ReplicationConn) WaitForReplicationMessage(timeout time.Duration) (r *ReplicationMessage, err error) { + var zeroTime time.Time + + deadline := time.Now().Add(timeout) + + // Use SetReadDeadline to implement the timeout. SetReadDeadline will + // cause operations to fail with a *net.OpError that has a Timeout() + // of true. Because the normal pgx rxMsg path considers any error to + // have potentially corrupted the state of the connection, it dies + // on any errors. So to avoid timeout errors in rxMsg we set the + // deadline and peek into the reader. If a timeout error occurs there + // we don't break the pgx connection. If the Peek returns that data + // is available then we turn off the read deadline before the rxMsg. + err = rc.c.conn.SetReadDeadline(deadline) + if err != nil { + return nil, err + } + + // Wait until there is a byte available before continuing onto the normal msg reading path + _, err = rc.c.reader.Peek(1) + if err != nil { + rc.c.conn.SetReadDeadline(zeroTime) // we can only return one error and we already have one -- so ignore possiple error from SetReadDeadline + if err, ok := err.(*net.OpError); ok && err.Timeout() { + return nil, ErrNotificationTimeout + } + return nil, err + } + + err = rc.c.conn.SetReadDeadline(zeroTime) + if err != nil { + return nil, err + } + + return rc.readReplicationMessage() +} + +func (rc *ReplicationConn) sendReplicationModeQuery(sql string) (*Rows, error) { + rc.c.lastActivityTime = time.Now() + + rows := rc.c.getRows(sql, nil) + + if err := rc.c.lock(); err != nil { + rows.abort(err) + return rows, err + } + rows.unlockConn = true + + err := rc.c.sendSimpleQuery(sql) + if err != nil { + rows.abort(err) + } + + var t byte + var r *msgReader + t, r, err = rc.c.rxMsg() + if err != nil { + return nil, err + } + + switch t { + case rowDescription: + rows.fields = rc.c.rxRowDescription(r) + // We don't have c.PgTypes here because we're a replication + // connection. This means the field descriptions will have + // only Oids. Not much we can do about this. + default: + if e := rc.c.processContextFreeMsg(t, r); e != nil { + rows.abort(e) + return rows, e + } + } + + return rows, rows.err +} + +// Execute the "IDENTIFY_SYSTEM" command as documented here: +// https://www.postgresql.org/docs/9.5/static/protocol-replication.html +// +// This will return (if successful) a result set that has a single row +// that contains the systemid, current timeline, xlogpos and database +// name. +// +// NOTE: Because this is a replication mode connection, we don't have +// type names, so the field descriptions in the result will have only +// Oids and no DataTypeName values +func (rc *ReplicationConn) IdentifySystem() (r *Rows, err error) { + return rc.sendReplicationModeQuery("IDENTIFY_SYSTEM") +} + +// Execute the "TIMELINE_HISTORY" command as documented here: +// https://www.postgresql.org/docs/9.5/static/protocol-replication.html +// +// This will return (if successful) a result set that has a single row +// that contains the filename of the history file and the content +// of the history file. If called for timeline 1, typically this will +// generate an error that the timeline history file does not exist. +// +// NOTE: Because this is a replication mode connection, we don't have +// type names, so the field descriptions in the result will have only +// Oids and no DataTypeName values +func (rc *ReplicationConn) TimelineHistory(timeline int) (r *Rows, err error) { + return rc.sendReplicationModeQuery(fmt.Sprintf("TIMELINE_HISTORY %d", timeline)) +} + +// Start a replication connection, sending WAL data to the given replication +// receiver. This function wraps a START_REPLICATION command as documented +// here: +// https://www.postgresql.org/docs/9.5/static/protocol-replication.html +// +// Once started, the client needs to invoke WaitForReplicationMessage() in order +// to fetch the WAL and standby status. Also, it is the responsibility of the caller +// to periodically send StandbyStatus messages to update the replication slot position. +// +// This function assumes that slotName has already been created. In order to omit the timeline argument +// pass a -1 for the timeline to get the server default behavior. +func (rc *ReplicationConn) StartReplication(slotName string, startLsn uint64, timeline int64, pluginArguments ...string) (err error) { + var queryString string + if timeline >= 0 { + queryString = fmt.Sprintf("START_REPLICATION SLOT %s LOGICAL %s TIMELINE %d", slotName, FormatLSN(startLsn), timeline) + } else { + queryString = fmt.Sprintf("START_REPLICATION SLOT %s LOGICAL %s", slotName, FormatLSN(startLsn)) + } + + for _, arg := range pluginArguments { + queryString += fmt.Sprintf(" %s", arg) + } + + if err = rc.c.sendQuery(queryString); err != nil { + return + } + + // The first replication message that comes back here will be (in a success case) + // a empty CopyBoth that is (apparently) sent as the confirmation that the replication has + // started. This call will either return nil, nil or if it returns an error + // that indicates the start replication command failed + var r *ReplicationMessage + r, err = rc.WaitForReplicationMessage(initialReplicationResponseTimeout) + if err != nil && r != nil { + if rc.c.shouldLog(LogLevelError) { + rc.c.log(LogLevelError, "Unxpected replication message %v", r) + } + } + + return +} + +// Create the replication slot, using the given name and output plugin. +func (rc *ReplicationConn) CreateReplicationSlot(slotName, outputPlugin string) (err error) { + _, err = rc.c.Exec(fmt.Sprintf("CREATE_REPLICATION_SLOT %s LOGICAL %s", slotName, outputPlugin)) + return +} + +// Drop the replication slot for the given name +func (rc *ReplicationConn) DropReplicationSlot(slotName string) (err error) { + _, err = rc.c.Exec(fmt.Sprintf("DROP_REPLICATION_SLOT %s", slotName)) + return +} diff --git a/vendor/github.com/jackc/pgx/sql.go b/vendor/github.com/jackc/pgx/sql.go new file mode 100644 index 0000000..7ee0f2a --- /dev/null +++ b/vendor/github.com/jackc/pgx/sql.go @@ -0,0 +1,29 @@ +package pgx + +import ( + "strconv" +) + +// QueryArgs is a container for arguments to an SQL query. It is helpful when +// building SQL statements where the number of arguments is variable. +type QueryArgs []interface{} + +var placeholders []string + +func init() { + placeholders = make([]string, 64) + + for i := 1; i < 64; i++ { + placeholders[i] = "$" + strconv.Itoa(i) + } +} + +// Append adds a value to qa and returns the placeholder value for the +// argument. e.g. $1, $2, etc. +func (qa *QueryArgs) Append(v interface{}) string { + *qa = append(*qa, v) + if len(*qa) < len(placeholders) { + return placeholders[len(*qa)] + } + return "$" + strconv.Itoa(len(*qa)) +} diff --git a/vendor/github.com/jackc/pgx/tx.go b/vendor/github.com/jackc/pgx/tx.go new file mode 100644 index 0000000..deb6c01 --- /dev/null +++ b/vendor/github.com/jackc/pgx/tx.go @@ -0,0 +1,207 @@ +package pgx + +import ( + "errors" + "fmt" +) + +// Transaction isolation levels +const ( + Serializable = "serializable" + RepeatableRead = "repeatable read" + ReadCommitted = "read committed" + ReadUncommitted = "read uncommitted" +) + +const ( + TxStatusInProgress = 0 + TxStatusCommitFailure = -1 + TxStatusRollbackFailure = -2 + TxStatusCommitSuccess = 1 + TxStatusRollbackSuccess = 2 +) + +var ErrTxClosed = errors.New("tx is closed") + +// ErrTxCommitRollback occurs when an error has occurred in a transaction and +// Commit() is called. PostgreSQL accepts COMMIT on aborted transactions, but +// it is treated as ROLLBACK. +var ErrTxCommitRollback = errors.New("commit unexpectedly resulted in rollback") + +// Begin starts a transaction with the default isolation level for the current +// connection. To use a specific isolation level see BeginIso. +func (c *Conn) Begin() (*Tx, error) { + return c.begin("") +} + +// BeginIso starts a transaction with isoLevel as the transaction isolation +// level. +// +// Valid isolation levels (and their constants) are: +// serializable (pgx.Serializable) +// repeatable read (pgx.RepeatableRead) +// read committed (pgx.ReadCommitted) +// read uncommitted (pgx.ReadUncommitted) +func (c *Conn) BeginIso(isoLevel string) (*Tx, error) { + return c.begin(isoLevel) +} + +func (c *Conn) begin(isoLevel string) (*Tx, error) { + var beginSQL string + if isoLevel == "" { + beginSQL = "begin" + } else { + beginSQL = fmt.Sprintf("begin isolation level %s", isoLevel) + } + + _, err := c.Exec(beginSQL) + if err != nil { + return nil, err + } + + return &Tx{conn: c}, nil +} + +// Tx represents a database transaction. +// +// All Tx methods return ErrTxClosed if Commit or Rollback has already been +// called on the Tx. +type Tx struct { + conn *Conn + afterClose func(*Tx) + err error + status int8 +} + +// Commit commits the transaction +func (tx *Tx) Commit() error { + if tx.status != TxStatusInProgress { + return ErrTxClosed + } + + commandTag, err := tx.conn.Exec("commit") + if err == nil && commandTag == "COMMIT" { + tx.status = TxStatusCommitSuccess + } else if err == nil && commandTag == "ROLLBACK" { + tx.status = TxStatusCommitFailure + tx.err = ErrTxCommitRollback + } else { + tx.status = TxStatusCommitFailure + tx.err = err + } + + if tx.afterClose != nil { + tx.afterClose(tx) + } + return tx.err +} + +// Rollback rolls back the transaction. Rollback will return ErrTxClosed if the +// Tx is already closed, but is otherwise safe to call multiple times. Hence, a +// defer tx.Rollback() is safe even if tx.Commit() will be called first in a +// non-error condition. +func (tx *Tx) Rollback() error { + if tx.status != TxStatusInProgress { + return ErrTxClosed + } + + _, tx.err = tx.conn.Exec("rollback") + if tx.err == nil { + tx.status = TxStatusRollbackSuccess + } else { + tx.status = TxStatusRollbackFailure + } + + if tx.afterClose != nil { + tx.afterClose(tx) + } + return tx.err +} + +// Exec delegates to the underlying *Conn +func (tx *Tx) Exec(sql string, arguments ...interface{}) (commandTag CommandTag, err error) { + if tx.status != TxStatusInProgress { + return CommandTag(""), ErrTxClosed + } + + return tx.conn.Exec(sql, arguments...) +} + +// Prepare delegates to the underlying *Conn +func (tx *Tx) Prepare(name, sql string) (*PreparedStatement, error) { + return tx.PrepareEx(name, sql, nil) +} + +// PrepareEx delegates to the underlying *Conn +func (tx *Tx) PrepareEx(name, sql string, opts *PrepareExOptions) (*PreparedStatement, error) { + if tx.status != TxStatusInProgress { + return nil, ErrTxClosed + } + + return tx.conn.PrepareEx(name, sql, opts) +} + +// Query delegates to the underlying *Conn +func (tx *Tx) Query(sql string, args ...interface{}) (*Rows, error) { + if tx.status != TxStatusInProgress { + // Because checking for errors can be deferred to the *Rows, build one with the error + err := ErrTxClosed + return &Rows{closed: true, err: err}, err + } + + return tx.conn.Query(sql, args...) +} + +// QueryRow delegates to the underlying *Conn +func (tx *Tx) QueryRow(sql string, args ...interface{}) *Row { + rows, _ := tx.Query(sql, args...) + return (*Row)(rows) +} + +// Deprecated. Use CopyFrom instead. CopyTo delegates to the underlying *Conn +func (tx *Tx) CopyTo(tableName string, columnNames []string, rowSrc CopyToSource) (int, error) { + if tx.status != TxStatusInProgress { + return 0, ErrTxClosed + } + + return tx.conn.CopyTo(tableName, columnNames, rowSrc) +} + +// CopyFrom delegates to the underlying *Conn +func (tx *Tx) CopyFrom(tableName Identifier, columnNames []string, rowSrc CopyFromSource) (int, error) { + if tx.status != TxStatusInProgress { + return 0, ErrTxClosed + } + + return tx.conn.CopyFrom(tableName, columnNames, rowSrc) +} + +// Conn returns the *Conn this transaction is using. +func (tx *Tx) Conn() *Conn { + return tx.conn +} + +// Status returns the status of the transaction from the set of +// pgx.TxStatus* constants. +func (tx *Tx) Status() int8 { + return tx.status +} + +// Err returns the final error state, if any, of calling Commit or Rollback. +func (tx *Tx) Err() error { + return tx.err +} + +// AfterClose adds f to a LILO queue of functions that will be called when +// the transaction is closed (either Commit or Rollback). +func (tx *Tx) AfterClose(f func(*Tx)) { + if tx.afterClose == nil { + tx.afterClose = f + } else { + prevFn := tx.afterClose + tx.afterClose = func(tx *Tx) { + f(tx) + prevFn(tx) + } + } +} diff --git a/vendor/github.com/jackc/pgx/value_reader.go b/vendor/github.com/jackc/pgx/value_reader.go new file mode 100644 index 0000000..a489754 --- /dev/null +++ b/vendor/github.com/jackc/pgx/value_reader.go @@ -0,0 +1,156 @@ +package pgx + +import ( + "errors" +) + +// ValueReader is used by the Scanner interface to decode values. +type ValueReader struct { + mr *msgReader + fd *FieldDescription + valueBytesRemaining int32 + err error +} + +// Err returns any error that the ValueReader has experienced +func (r *ValueReader) Err() error { + return r.err +} + +// Fatal tells r that a Fatal error has occurred +func (r *ValueReader) Fatal(err error) { + r.err = err +} + +// Len returns the number of unread bytes +func (r *ValueReader) Len() int32 { + return r.valueBytesRemaining +} + +// Type returns the *FieldDescription of the value +func (r *ValueReader) Type() *FieldDescription { + return r.fd +} + +func (r *ValueReader) ReadByte() byte { + if r.err != nil { + return 0 + } + + r.valueBytesRemaining-- + if r.valueBytesRemaining < 0 { + r.Fatal(errors.New("read past end of value")) + return 0 + } + + return r.mr.readByte() +} + +func (r *ValueReader) ReadInt16() int16 { + if r.err != nil { + return 0 + } + + r.valueBytesRemaining -= 2 + if r.valueBytesRemaining < 0 { + r.Fatal(errors.New("read past end of value")) + return 0 + } + + return r.mr.readInt16() +} + +func (r *ValueReader) ReadUint16() uint16 { + if r.err != nil { + return 0 + } + + r.valueBytesRemaining -= 2 + if r.valueBytesRemaining < 0 { + r.Fatal(errors.New("read past end of value")) + return 0 + } + + return r.mr.readUint16() +} + +func (r *ValueReader) ReadInt32() int32 { + if r.err != nil { + return 0 + } + + r.valueBytesRemaining -= 4 + if r.valueBytesRemaining < 0 { + r.Fatal(errors.New("read past end of value")) + return 0 + } + + return r.mr.readInt32() +} + +func (r *ValueReader) ReadUint32() uint32 { + if r.err != nil { + return 0 + } + + r.valueBytesRemaining -= 4 + if r.valueBytesRemaining < 0 { + r.Fatal(errors.New("read past end of value")) + return 0 + } + + return r.mr.readUint32() +} + +func (r *ValueReader) ReadInt64() int64 { + if r.err != nil { + return 0 + } + + r.valueBytesRemaining -= 8 + if r.valueBytesRemaining < 0 { + r.Fatal(errors.New("read past end of value")) + return 0 + } + + return r.mr.readInt64() +} + +func (r *ValueReader) ReadOid() Oid { + return Oid(r.ReadUint32()) +} + +// ReadString reads count bytes and returns as string +func (r *ValueReader) ReadString(count int32) string { + if r.err != nil { + return "" + } + + r.valueBytesRemaining -= count + if r.valueBytesRemaining < 0 { + r.Fatal(errors.New("read past end of value")) + return "" + } + + return r.mr.readString(count) +} + +// ReadBytes reads count bytes and returns as []byte +func (r *ValueReader) ReadBytes(count int32) []byte { + if r.err != nil { + return nil + } + + if count < 0 { + r.Fatal(errors.New("count must not be negative")) + return nil + } + + r.valueBytesRemaining -= count + if r.valueBytesRemaining < 0 { + r.Fatal(errors.New("read past end of value")) + return nil + } + + return r.mr.readBytes(count) +} diff --git a/vendor/github.com/jackc/pgx/values.go b/vendor/github.com/jackc/pgx/values.go new file mode 100644 index 0000000..a189e18 --- /dev/null +++ b/vendor/github.com/jackc/pgx/values.go @@ -0,0 +1,3439 @@ +package pgx + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" + "io" + "math" + "net" + "reflect" + "regexp" + "strconv" + "strings" + "time" +) + +// PostgreSQL oids for common types +const ( + BoolOid = 16 + ByteaOid = 17 + CharOid = 18 + NameOid = 19 + Int8Oid = 20 + Int2Oid = 21 + Int4Oid = 23 + TextOid = 25 + OidOid = 26 + TidOid = 27 + XidOid = 28 + CidOid = 29 + JsonOid = 114 + CidrOid = 650 + CidrArrayOid = 651 + Float4Oid = 700 + Float8Oid = 701 + UnknownOid = 705 + InetOid = 869 + BoolArrayOid = 1000 + Int2ArrayOid = 1005 + Int4ArrayOid = 1007 + TextArrayOid = 1009 + ByteaArrayOid = 1001 + VarcharArrayOid = 1015 + Int8ArrayOid = 1016 + Float4ArrayOid = 1021 + Float8ArrayOid = 1022 + AclItemOid = 1033 + AclItemArrayOid = 1034 + InetArrayOid = 1041 + VarcharOid = 1043 + DateOid = 1082 + TimestampOid = 1114 + TimestampArrayOid = 1115 + TimestampTzOid = 1184 + TimestampTzArrayOid = 1185 + RecordOid = 2249 + UuidOid = 2950 + JsonbOid = 3802 +) + +// PostgreSQL format codes +const ( + TextFormatCode = 0 + BinaryFormatCode = 1 +) + +const maxUint = ^uint(0) +const maxInt = int(maxUint >> 1) +const minInt = -maxInt - 1 + +// DefaultTypeFormats maps type names to their default requested format (text +// or binary). In theory the Scanner interface should be the one to determine +// the format of the returned values. However, the query has already been +// executed by the time Scan is called so it has no chance to set the format. +// So for types that should always be returned in binary the format should be +// set here. +var DefaultTypeFormats map[string]int16 + +func init() { + DefaultTypeFormats = map[string]int16{ + "_aclitem": TextFormatCode, // Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin) + "_bool": BinaryFormatCode, + "_bytea": BinaryFormatCode, + "_cidr": BinaryFormatCode, + "_float4": BinaryFormatCode, + "_float8": BinaryFormatCode, + "_inet": BinaryFormatCode, + "_int2": BinaryFormatCode, + "_int4": BinaryFormatCode, + "_int8": BinaryFormatCode, + "_text": BinaryFormatCode, + "_timestamp": BinaryFormatCode, + "_timestamptz": BinaryFormatCode, + "_varchar": BinaryFormatCode, + "aclitem": TextFormatCode, // Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin) + "bool": BinaryFormatCode, + "bytea": BinaryFormatCode, + "char": BinaryFormatCode, + "cid": BinaryFormatCode, + "cidr": BinaryFormatCode, + "date": BinaryFormatCode, + "float4": BinaryFormatCode, + "float8": BinaryFormatCode, + "json": BinaryFormatCode, + "jsonb": BinaryFormatCode, + "inet": BinaryFormatCode, + "int2": BinaryFormatCode, + "int4": BinaryFormatCode, + "int8": BinaryFormatCode, + "name": BinaryFormatCode, + "oid": BinaryFormatCode, + "record": BinaryFormatCode, + "text": BinaryFormatCode, + "tid": BinaryFormatCode, + "timestamp": BinaryFormatCode, + "timestamptz": BinaryFormatCode, + "varchar": BinaryFormatCode, + "xid": BinaryFormatCode, + } +} + +// SerializationError occurs on failure to encode or decode a value +type SerializationError string + +func (e SerializationError) Error() string { + return string(e) +} + +// Deprecated: Scanner is an interface used to decode values from the PostgreSQL +// server. To allow types to support pgx and database/sql.Scan this interface +// has been deprecated in favor of PgxScanner. +type Scanner interface { + // Scan MUST check r.Type().DataType (to check by OID) or + // r.Type().DataTypeName (to check by name) to ensure that it is scanning an + // expected column type. It also MUST check r.Type().FormatCode before + // decoding. It should not assume that it was called on a data type or format + // that it understands. + Scan(r *ValueReader) error +} + +// PgxScanner is an interface used to decode values from the PostgreSQL server. +// It is used exactly the same as the Scanner interface. It simply has renamed +// the method. +type PgxScanner interface { + // ScanPgx MUST check r.Type().DataType (to check by OID) or + // r.Type().DataTypeName (to check by name) to ensure that it is scanning an + // expected column type. It also MUST check r.Type().FormatCode before + // decoding. It should not assume that it was called on a data type or format + // that it understands. + ScanPgx(r *ValueReader) error +} + +// Encoder is an interface used to encode values for transmission to the +// PostgreSQL server. +type Encoder interface { + // Encode writes the value to w. + // + // If the value is NULL an int32(-1) should be written. + // + // Encode MUST check oid to see if the parameter data type is compatible. If + // this is not done, the PostgreSQL server may detect the error if the + // expected data size or format of the encoded data does not match. But if + // the encoded data is a valid representation of the data type PostgreSQL + // expects such as date and int4, incorrect data may be stored. + Encode(w *WriteBuf, oid Oid) error + + // FormatCode returns the format that the encoder writes the value. It must be + // either pgx.TextFormatCode or pgx.BinaryFormatCode. + FormatCode() int16 +} + +// NullFloat32 represents an float4 that may be null. NullFloat32 implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullFloat32 struct { + Float32 float32 + Valid bool // Valid is true if Float32 is not NULL +} + +func (n *NullFloat32) Scan(vr *ValueReader) error { + if vr.Type().DataType != Float4Oid { + return SerializationError(fmt.Sprintf("NullFloat32.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Float32, n.Valid = 0, false + return nil + } + n.Valid = true + n.Float32 = decodeFloat4(vr) + return vr.Err() +} + +func (n NullFloat32) FormatCode() int16 { return BinaryFormatCode } + +func (n NullFloat32) Encode(w *WriteBuf, oid Oid) error { + if oid != Float4Oid { + return SerializationError(fmt.Sprintf("NullFloat32.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeFloat32(w, oid, n.Float32) +} + +// NullFloat64 represents an float8 that may be null. NullFloat64 implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullFloat64 struct { + Float64 float64 + Valid bool // Valid is true if Float64 is not NULL +} + +func (n *NullFloat64) Scan(vr *ValueReader) error { + if vr.Type().DataType != Float8Oid { + return SerializationError(fmt.Sprintf("NullFloat64.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Float64, n.Valid = 0, false + return nil + } + n.Valid = true + n.Float64 = decodeFloat8(vr) + return vr.Err() +} + +func (n NullFloat64) FormatCode() int16 { return BinaryFormatCode } + +func (n NullFloat64) Encode(w *WriteBuf, oid Oid) error { + if oid != Float8Oid { + return SerializationError(fmt.Sprintf("NullFloat64.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeFloat64(w, oid, n.Float64) +} + +// NullString represents an string that may be null. NullString implements the +// Scanner Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullString struct { + String string + Valid bool // Valid is true if String is not NULL +} + +func (n *NullString) Scan(vr *ValueReader) error { + // Not checking oid as so we can scan anything into into a NullString - may revisit this decision later + + if vr.Len() == -1 { + n.String, n.Valid = "", false + return nil + } + + n.Valid = true + n.String = decodeText(vr) + return vr.Err() +} + +func (n NullString) FormatCode() int16 { return TextFormatCode } + +func (n NullString) Encode(w *WriteBuf, oid Oid) error { + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeString(w, oid, n.String) +} + +// AclItem is used for PostgreSQL's aclitem data type. A sample aclitem +// might look like this: +// +// postgres=arwdDxt/postgres +// +// Note, however, that because the user/role name part of an aclitem is +// an identifier, it follows all the usual formatting rules for SQL +// identifiers: if it contains spaces and other special characters, +// it should appear in double-quotes: +// +// postgres=arwdDxt/"role with spaces" +// +type AclItem string + +// NullAclItem represents a pgx.AclItem that may be null. NullAclItem implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan for prepared and unprepared queries. +// +// If Valid is false then the value is NULL. +type NullAclItem struct { + AclItem AclItem + Valid bool // Valid is true if AclItem is not NULL +} + +func (n *NullAclItem) Scan(vr *ValueReader) error { + if vr.Type().DataType != AclItemOid { + return SerializationError(fmt.Sprintf("NullAclItem.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.AclItem, n.Valid = "", false + return nil + } + + n.Valid = true + n.AclItem = AclItem(decodeText(vr)) + return vr.Err() +} + +// Particularly important to return TextFormatCode, seeing as Postgres +// only ever sends aclitem as text, not binary. +func (n NullAclItem) FormatCode() int16 { return TextFormatCode } + +func (n NullAclItem) Encode(w *WriteBuf, oid Oid) error { + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeString(w, oid, string(n.AclItem)) +} + +// Name is a type used for PostgreSQL's special 63-byte +// name data type, used for identifiers like table names. +// The pg_class.relname column is a good example of where the +// name data type is used. +// +// Note that the underlying Go data type of pgx.Name is string, +// so there is no way to enforce the 63-byte length. Inputting +// a longer name into PostgreSQL will result in silent truncation +// to 63 bytes. +// +// Also, if you have custom-compiled PostgreSQL and set +// NAMEDATALEN to a different value, obviously that number of +// bytes applies, rather than the default 63. +type Name string + +// NullName represents a pgx.Name that may be null. NullName implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan for prepared and unprepared queries. +// +// If Valid is false then the value is NULL. +type NullName struct { + Name Name + Valid bool // Valid is true if Name is not NULL +} + +func (n *NullName) Scan(vr *ValueReader) error { + if vr.Type().DataType != NameOid { + return SerializationError(fmt.Sprintf("NullName.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Name, n.Valid = "", false + return nil + } + + n.Valid = true + n.Name = Name(decodeText(vr)) + return vr.Err() +} + +func (n NullName) FormatCode() int16 { return TextFormatCode } + +func (n NullName) Encode(w *WriteBuf, oid Oid) error { + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeString(w, oid, string(n.Name)) +} + +// The pgx.Char type is for PostgreSQL's special 8-bit-only +// "char" type more akin to the C language's char type, or Go's byte type. +// (Note that the name in PostgreSQL itself is "char", in double-quotes, +// and not char.) It gets used a lot in PostgreSQL's system tables to hold +// a single ASCII character value (eg pg_class.relkind). +type Char byte + +// NullChar represents a pgx.Char that may be null. NullChar implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan for prepared and unprepared queries. +// +// If Valid is false then the value is NULL. +type NullChar struct { + Char Char + Valid bool // Valid is true if Char is not NULL +} + +func (n *NullChar) Scan(vr *ValueReader) error { + if vr.Type().DataType != CharOid { + return SerializationError(fmt.Sprintf("NullChar.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Char, n.Valid = 0, false + return nil + } + n.Valid = true + n.Char = decodeChar(vr) + return vr.Err() +} + +func (n NullChar) FormatCode() int16 { return BinaryFormatCode } + +func (n NullChar) Encode(w *WriteBuf, oid Oid) error { + if oid != CharOid { + return SerializationError(fmt.Sprintf("NullChar.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeChar(w, oid, n.Char) +} + +// NullInt16 represents a smallint that may be null. NullInt16 implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan for prepared and unprepared queries. +// +// If Valid is false then the value is NULL. +type NullInt16 struct { + Int16 int16 + Valid bool // Valid is true if Int16 is not NULL +} + +func (n *NullInt16) Scan(vr *ValueReader) error { + if vr.Type().DataType != Int2Oid { + return SerializationError(fmt.Sprintf("NullInt16.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Int16, n.Valid = 0, false + return nil + } + n.Valid = true + n.Int16 = decodeInt2(vr) + return vr.Err() +} + +func (n NullInt16) FormatCode() int16 { return BinaryFormatCode } + +func (n NullInt16) Encode(w *WriteBuf, oid Oid) error { + if oid != Int2Oid { + return SerializationError(fmt.Sprintf("NullInt16.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeInt16(w, oid, n.Int16) +} + +// NullInt32 represents an integer that may be null. NullInt32 implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullInt32 struct { + Int32 int32 + Valid bool // Valid is true if Int32 is not NULL +} + +func (n *NullInt32) Scan(vr *ValueReader) error { + if vr.Type().DataType != Int4Oid { + return SerializationError(fmt.Sprintf("NullInt32.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Int32, n.Valid = 0, false + return nil + } + n.Valid = true + n.Int32 = decodeInt4(vr) + return vr.Err() +} + +func (n NullInt32) FormatCode() int16 { return BinaryFormatCode } + +func (n NullInt32) Encode(w *WriteBuf, oid Oid) error { + if oid != Int4Oid { + return SerializationError(fmt.Sprintf("NullInt32.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeInt32(w, oid, n.Int32) +} + +// Oid (Object Identifier Type) is, according to https://www.postgresql.org/docs/current/static/datatype-oid.html, +// used internally by PostgreSQL as a primary key for various system tables. It is currently implemented +// as an unsigned four-byte integer. Its definition can be found in src/include/postgres_ext.h +// in the PostgreSQL sources. +type Oid uint32 + +// NullOid represents a Command Identifier (Oid) that may be null. NullOid implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullOid struct { + Oid Oid + Valid bool // Valid is true if Oid is not NULL +} + +func (n *NullOid) Scan(vr *ValueReader) error { + if vr.Type().DataType != OidOid { + return SerializationError(fmt.Sprintf("NullOid.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Oid, n.Valid = 0, false + return nil + } + n.Valid = true + n.Oid = decodeOid(vr) + return vr.Err() +} + +func (n NullOid) FormatCode() int16 { return BinaryFormatCode } + +func (n NullOid) Encode(w *WriteBuf, oid Oid) error { + if oid != OidOid { + return SerializationError(fmt.Sprintf("NullOid.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeOid(w, oid, n.Oid) +} + +// Xid is PostgreSQL's Transaction ID type. +// +// In later versions of PostgreSQL, it is the type used for the backend_xid +// and backend_xmin columns of the pg_stat_activity system view. +// +// Also, when one does +// +// select xmin, xmax, * from some_table; +// +// it is the data type of the xmin and xmax hidden system columns. +// +// It is currently implemented as an unsigned four byte integer. +// Its definition can be found in src/include/postgres_ext.h as TransactionId +// in the PostgreSQL sources. +type Xid uint32 + +// NullXid represents a Transaction ID (Xid) that may be null. NullXid implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullXid struct { + Xid Xid + Valid bool // Valid is true if Xid is not NULL +} + +func (n *NullXid) Scan(vr *ValueReader) error { + if vr.Type().DataType != XidOid { + return SerializationError(fmt.Sprintf("NullXid.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Xid, n.Valid = 0, false + return nil + } + n.Valid = true + n.Xid = decodeXid(vr) + return vr.Err() +} + +func (n NullXid) FormatCode() int16 { return BinaryFormatCode } + +func (n NullXid) Encode(w *WriteBuf, oid Oid) error { + if oid != XidOid { + return SerializationError(fmt.Sprintf("NullXid.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeXid(w, oid, n.Xid) +} + +// Cid is PostgreSQL's Command Identifier type. +// +// When one does +// +// select cmin, cmax, * from some_table; +// +// it is the data type of the cmin and cmax hidden system columns. +// +// It is currently implemented as an unsigned four byte integer. +// Its definition can be found in src/include/c.h as CommandId +// in the PostgreSQL sources. +type Cid uint32 + +// NullCid represents a Command Identifier (Cid) that may be null. NullCid implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullCid struct { + Cid Cid + Valid bool // Valid is true if Cid is not NULL +} + +func (n *NullCid) Scan(vr *ValueReader) error { + if vr.Type().DataType != CidOid { + return SerializationError(fmt.Sprintf("NullCid.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Cid, n.Valid = 0, false + return nil + } + n.Valid = true + n.Cid = decodeCid(vr) + return vr.Err() +} + +func (n NullCid) FormatCode() int16 { return BinaryFormatCode } + +func (n NullCid) Encode(w *WriteBuf, oid Oid) error { + if oid != CidOid { + return SerializationError(fmt.Sprintf("NullCid.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeCid(w, oid, n.Cid) +} + +// Tid is PostgreSQL's Tuple Identifier type. +// +// When one does +// +// select ctid, * from some_table; +// +// it is the data type of the ctid hidden system column. +// +// It is currently implemented as a pair unsigned two byte integers. +// Its conversion functions can be found in src/backend/utils/adt/tid.c +// in the PostgreSQL sources. +type Tid struct { + BlockNumber uint32 + OffsetNumber uint16 +} + +// NullTid represents a Tuple Identifier (Tid) that may be null. NullTid implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullTid struct { + Tid Tid + Valid bool // Valid is true if Tid is not NULL +} + +func (n *NullTid) Scan(vr *ValueReader) error { + if vr.Type().DataType != TidOid { + return SerializationError(fmt.Sprintf("NullTid.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Tid, n.Valid = Tid{BlockNumber: 0, OffsetNumber: 0}, false + return nil + } + n.Valid = true + n.Tid = decodeTid(vr) + return vr.Err() +} + +func (n NullTid) FormatCode() int16 { return BinaryFormatCode } + +func (n NullTid) Encode(w *WriteBuf, oid Oid) error { + if oid != TidOid { + return SerializationError(fmt.Sprintf("NullTid.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeTid(w, oid, n.Tid) +} + +// NullInt64 represents an bigint that may be null. NullInt64 implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullInt64 struct { + Int64 int64 + Valid bool // Valid is true if Int64 is not NULL +} + +func (n *NullInt64) Scan(vr *ValueReader) error { + if vr.Type().DataType != Int8Oid { + return SerializationError(fmt.Sprintf("NullInt64.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Int64, n.Valid = 0, false + return nil + } + n.Valid = true + n.Int64 = decodeInt8(vr) + return vr.Err() +} + +func (n NullInt64) FormatCode() int16 { return BinaryFormatCode } + +func (n NullInt64) Encode(w *WriteBuf, oid Oid) error { + if oid != Int8Oid { + return SerializationError(fmt.Sprintf("NullInt64.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeInt64(w, oid, n.Int64) +} + +// NullBool represents an bool that may be null. NullBool implements the Scanner +// and Encoder interfaces so it may be used both as an argument to Query[Row] +// and a destination for Scan. +// +// If Valid is false then the value is NULL. +type NullBool struct { + Bool bool + Valid bool // Valid is true if Bool is not NULL +} + +func (n *NullBool) Scan(vr *ValueReader) error { + if vr.Type().DataType != BoolOid { + return SerializationError(fmt.Sprintf("NullBool.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Bool, n.Valid = false, false + return nil + } + n.Valid = true + n.Bool = decodeBool(vr) + return vr.Err() +} + +func (n NullBool) FormatCode() int16 { return BinaryFormatCode } + +func (n NullBool) Encode(w *WriteBuf, oid Oid) error { + if oid != BoolOid { + return SerializationError(fmt.Sprintf("NullBool.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeBool(w, oid, n.Bool) +} + +// NullTime represents an time.Time that may be null. NullTime implements the +// Scanner and Encoder interfaces so it may be used both as an argument to +// Query[Row] and a destination for Scan. It corresponds with the PostgreSQL +// types timestamptz, timestamp, and date. +// +// If Valid is false then the value is NULL. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +func (n *NullTime) Scan(vr *ValueReader) error { + oid := vr.Type().DataType + if oid != TimestampTzOid && oid != TimestampOid && oid != DateOid { + return SerializationError(fmt.Sprintf("NullTime.Scan cannot decode OID %d", vr.Type().DataType)) + } + + if vr.Len() == -1 { + n.Time, n.Valid = time.Time{}, false + return nil + } + + n.Valid = true + switch oid { + case TimestampTzOid: + n.Time = decodeTimestampTz(vr) + case TimestampOid: + n.Time = decodeTimestamp(vr) + case DateOid: + n.Time = decodeDate(vr) + } + + return vr.Err() +} + +func (n NullTime) FormatCode() int16 { return BinaryFormatCode } + +func (n NullTime) Encode(w *WriteBuf, oid Oid) error { + if oid != TimestampTzOid && oid != TimestampOid && oid != DateOid { + return SerializationError(fmt.Sprintf("NullTime.Encode cannot encode into OID %d", oid)) + } + + if !n.Valid { + w.WriteInt32(-1) + return nil + } + + return encodeTime(w, oid, n.Time) +} + +// Hstore represents an hstore column. It does not support a null column or null +// key values (use NullHstore for this). Hstore implements the Scanner and +// Encoder interfaces so it may be used both as an argument to Query[Row] and a +// destination for Scan. +type Hstore map[string]string + +func (h *Hstore) Scan(vr *ValueReader) error { + //oid for hstore not standardized, so we check its type name + if vr.Type().DataTypeName != "hstore" { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode type %s into Hstore", vr.Type().DataTypeName))) + return nil + } + + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null column into Hstore")) + return nil + } + + switch vr.Type().FormatCode { + case TextFormatCode: + m, err := parseHstoreToMap(vr.ReadString(vr.Len())) + if err != nil { + vr.Fatal(ProtocolError(fmt.Sprintf("Can't decode hstore column: %v", err))) + return nil + } + hm := Hstore(m) + *h = hm + return nil + case BinaryFormatCode: + vr.Fatal(ProtocolError("Can't decode binary hstore")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } +} + +func (h Hstore) FormatCode() int16 { return TextFormatCode } + +func (h Hstore) Encode(w *WriteBuf, oid Oid) error { + var buf bytes.Buffer + + i := 0 + for k, v := range h { + i++ + ks := strings.Replace(k, `\`, `\\`, -1) + ks = strings.Replace(ks, `"`, `\"`, -1) + vs := strings.Replace(v, `\`, `\\`, -1) + vs = strings.Replace(vs, `"`, `\"`, -1) + buf.WriteString(`"`) + buf.WriteString(ks) + buf.WriteString(`"=>"`) + buf.WriteString(vs) + buf.WriteString(`"`) + if i < len(h) { + buf.WriteString(", ") + } + } + w.WriteInt32(int32(buf.Len())) + w.WriteBytes(buf.Bytes()) + return nil +} + +// NullHstore represents an hstore column that can be null or have null values +// associated with its keys. NullHstore implements the Scanner and Encoder +// interfaces so it may be used both as an argument to Query[Row] and a +// destination for Scan. +// +// If Valid is false, then the value of the entire hstore column is NULL +// If any of the NullString values in Store has Valid set to false, the key +// appears in the hstore column, but its value is explicitly set to NULL. +type NullHstore struct { + Hstore map[string]NullString + Valid bool +} + +func (h *NullHstore) Scan(vr *ValueReader) error { + //oid for hstore not standardized, so we check its type name + if vr.Type().DataTypeName != "hstore" { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode type %s into NullHstore", vr.Type().DataTypeName))) + return nil + } + + if vr.Len() == -1 { + h.Valid = false + return nil + } + + switch vr.Type().FormatCode { + case TextFormatCode: + store, err := parseHstoreToNullHstore(vr.ReadString(vr.Len())) + if err != nil { + vr.Fatal(ProtocolError(fmt.Sprintf("Can't decode hstore column: %v", err))) + return nil + } + h.Valid = true + h.Hstore = store + return nil + case BinaryFormatCode: + vr.Fatal(ProtocolError("Can't decode binary hstore")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } +} + +func (h NullHstore) FormatCode() int16 { return TextFormatCode } + +func (h NullHstore) Encode(w *WriteBuf, oid Oid) error { + var buf bytes.Buffer + + if !h.Valid { + w.WriteInt32(-1) + return nil + } + + i := 0 + for k, v := range h.Hstore { + i++ + ks := strings.Replace(k, `\`, `\\`, -1) + ks = strings.Replace(ks, `"`, `\"`, -1) + if v.Valid { + vs := strings.Replace(v.String, `\`, `\\`, -1) + vs = strings.Replace(vs, `"`, `\"`, -1) + buf.WriteString(fmt.Sprintf(`"%s"=>"%s"`, ks, vs)) + } else { + buf.WriteString(fmt.Sprintf(`"%s"=>NULL`, ks)) + } + if i < len(h.Hstore) { + buf.WriteString(", ") + } + } + w.WriteInt32(int32(buf.Len())) + w.WriteBytes(buf.Bytes()) + return nil +} + +// Encode encodes arg into wbuf as the type oid. This allows implementations +// of the Encoder interface to delegate the actual work of encoding to the +// built-in functionality. +func Encode(wbuf *WriteBuf, oid Oid, arg interface{}) error { + if arg == nil { + wbuf.WriteInt32(-1) + return nil + } + + switch arg := arg.(type) { + case Encoder: + return arg.Encode(wbuf, oid) + case driver.Valuer: + v, err := arg.Value() + if err != nil { + return err + } + return Encode(wbuf, oid, v) + case string: + return encodeString(wbuf, oid, arg) + case []AclItem: + return encodeAclItemSlice(wbuf, oid, arg) + case []byte: + return encodeByteSlice(wbuf, oid, arg) + case [][]byte: + return encodeByteSliceSlice(wbuf, oid, arg) + } + + refVal := reflect.ValueOf(arg) + + if refVal.Kind() == reflect.Ptr { + if refVal.IsNil() { + wbuf.WriteInt32(-1) + return nil + } + arg = refVal.Elem().Interface() + return Encode(wbuf, oid, arg) + } + + if oid == JsonOid { + return encodeJSON(wbuf, oid, arg) + } + if oid == JsonbOid { + return encodeJSONB(wbuf, oid, arg) + } + + switch arg := arg.(type) { + case []string: + return encodeStringSlice(wbuf, oid, arg) + case bool: + return encodeBool(wbuf, oid, arg) + case []bool: + return encodeBoolSlice(wbuf, oid, arg) + case int: + return encodeInt(wbuf, oid, arg) + case uint: + return encodeUInt(wbuf, oid, arg) + case Char: + return encodeChar(wbuf, oid, arg) + case AclItem: + // The aclitem data type goes over the wire using the same format as string, + // so just cast to string and use encodeString + return encodeString(wbuf, oid, string(arg)) + case Name: + // The name data type goes over the wire using the same format as string, + // so just cast to string and use encodeString + return encodeString(wbuf, oid, string(arg)) + case int8: + return encodeInt8(wbuf, oid, arg) + case uint8: + return encodeUInt8(wbuf, oid, arg) + case int16: + return encodeInt16(wbuf, oid, arg) + case []int16: + return encodeInt16Slice(wbuf, oid, arg) + case uint16: + return encodeUInt16(wbuf, oid, arg) + case []uint16: + return encodeUInt16Slice(wbuf, oid, arg) + case int32: + return encodeInt32(wbuf, oid, arg) + case []int32: + return encodeInt32Slice(wbuf, oid, arg) + case uint32: + return encodeUInt32(wbuf, oid, arg) + case []uint32: + return encodeUInt32Slice(wbuf, oid, arg) + case int64: + return encodeInt64(wbuf, oid, arg) + case []int64: + return encodeInt64Slice(wbuf, oid, arg) + case uint64: + return encodeUInt64(wbuf, oid, arg) + case []uint64: + return encodeUInt64Slice(wbuf, oid, arg) + case float32: + return encodeFloat32(wbuf, oid, arg) + case []float32: + return encodeFloat32Slice(wbuf, oid, arg) + case float64: + return encodeFloat64(wbuf, oid, arg) + case []float64: + return encodeFloat64Slice(wbuf, oid, arg) + case time.Time: + return encodeTime(wbuf, oid, arg) + case []time.Time: + return encodeTimeSlice(wbuf, oid, arg) + case net.IP: + return encodeIP(wbuf, oid, arg) + case []net.IP: + return encodeIPSlice(wbuf, oid, arg) + case net.IPNet: + return encodeIPNet(wbuf, oid, arg) + case []net.IPNet: + return encodeIPNetSlice(wbuf, oid, arg) + case Oid: + return encodeOid(wbuf, oid, arg) + case Xid: + return encodeXid(wbuf, oid, arg) + case Cid: + return encodeCid(wbuf, oid, arg) + default: + if strippedArg, ok := stripNamedType(&refVal); ok { + return Encode(wbuf, oid, strippedArg) + } + return SerializationError(fmt.Sprintf("Cannot encode %T into oid %v - %T must implement Encoder or be converted to a string", arg, oid, arg)) + } +} + +func stripNamedType(val *reflect.Value) (interface{}, bool) { + switch val.Kind() { + case reflect.Int: + return int(val.Int()), true + case reflect.Int8: + return int8(val.Int()), true + case reflect.Int16: + return int16(val.Int()), true + case reflect.Int32: + return int32(val.Int()), true + case reflect.Int64: + return int64(val.Int()), true + case reflect.Uint: + return uint(val.Uint()), true + case reflect.Uint8: + return uint8(val.Uint()), true + case reflect.Uint16: + return uint16(val.Uint()), true + case reflect.Uint32: + return uint32(val.Uint()), true + case reflect.Uint64: + return uint64(val.Uint()), true + case reflect.String: + return val.String(), true + } + + return nil, false +} + +// Decode decodes from vr into d. d must be a pointer. This allows +// implementations of the Decoder interface to delegate the actual work of +// decoding to the built-in functionality. +func Decode(vr *ValueReader, d interface{}) error { + switch v := d.(type) { + case *bool: + *v = decodeBool(vr) + case *int: + n := decodeInt(vr) + if n < int64(minInt) { + return fmt.Errorf("%d is less than minimum value for int", n) + } else if n > int64(maxInt) { + return fmt.Errorf("%d is greater than maximum value for int", n) + } + *v = int(n) + case *int8: + n := decodeInt(vr) + if n < math.MinInt8 { + return fmt.Errorf("%d is less than minimum value for int8", n) + } else if n > math.MaxInt8 { + return fmt.Errorf("%d is greater than maximum value for int8", n) + } + *v = int8(n) + case *int16: + n := decodeInt(vr) + if n < math.MinInt16 { + return fmt.Errorf("%d is less than minimum value for int16", n) + } else if n > math.MaxInt16 { + return fmt.Errorf("%d is greater than maximum value for int16", n) + } + *v = int16(n) + case *int32: + n := decodeInt(vr) + if n < math.MinInt32 { + return fmt.Errorf("%d is less than minimum value for int32", n) + } else if n > math.MaxInt32 { + return fmt.Errorf("%d is greater than maximum value for int32", n) + } + *v = int32(n) + case *int64: + n := decodeInt(vr) + if n < math.MinInt64 { + return fmt.Errorf("%d is less than minimum value for int64", n) + } else if n > math.MaxInt64 { + return fmt.Errorf("%d is greater than maximum value for int64", n) + } + *v = int64(n) + case *uint: + n := decodeInt(vr) + if n < 0 { + return fmt.Errorf("%d is less than zero for uint8", n) + } else if maxInt == math.MaxInt32 && n > math.MaxUint32 { + return fmt.Errorf("%d is greater than maximum value for uint", n) + } + *v = uint(n) + case *uint8: + n := decodeInt(vr) + if n < 0 { + return fmt.Errorf("%d is less than zero for uint8", n) + } else if n > math.MaxUint8 { + return fmt.Errorf("%d is greater than maximum value for uint8", n) + } + *v = uint8(n) + case *uint16: + n := decodeInt(vr) + if n < 0 { + return fmt.Errorf("%d is less than zero for uint16", n) + } else if n > math.MaxUint16 { + return fmt.Errorf("%d is greater than maximum value for uint16", n) + } + *v = uint16(n) + case *uint32: + n := decodeInt(vr) + if n < 0 { + return fmt.Errorf("%d is less than zero for uint32", n) + } else if n > math.MaxUint32 { + return fmt.Errorf("%d is greater than maximum value for uint32", n) + } + *v = uint32(n) + case *uint64: + n := decodeInt(vr) + if n < 0 { + return fmt.Errorf("%d is less than zero for uint64", n) + } + *v = uint64(n) + case *Char: + *v = decodeChar(vr) + case *AclItem: + // aclitem goes over the wire just like text + *v = AclItem(decodeText(vr)) + case *Name: + // name goes over the wire just like text + *v = Name(decodeText(vr)) + case *Oid: + *v = decodeOid(vr) + case *Xid: + *v = decodeXid(vr) + case *Tid: + *v = decodeTid(vr) + case *Cid: + *v = decodeCid(vr) + case *string: + *v = decodeText(vr) + case *float32: + *v = decodeFloat4(vr) + case *float64: + *v = decodeFloat8(vr) + case *[]AclItem: + *v = decodeAclItemArray(vr) + case *[]bool: + *v = decodeBoolArray(vr) + case *[]int16: + *v = decodeInt2Array(vr) + case *[]uint16: + *v = decodeInt2ArrayToUInt(vr) + case *[]int32: + *v = decodeInt4Array(vr) + case *[]uint32: + *v = decodeInt4ArrayToUInt(vr) + case *[]int64: + *v = decodeInt8Array(vr) + case *[]uint64: + *v = decodeInt8ArrayToUInt(vr) + case *[]float32: + *v = decodeFloat4Array(vr) + case *[]float64: + *v = decodeFloat8Array(vr) + case *[]string: + *v = decodeTextArray(vr) + case *[]time.Time: + *v = decodeTimestampArray(vr) + case *[][]byte: + *v = decodeByteaArray(vr) + case *[]interface{}: + *v = decodeRecord(vr) + case *time.Time: + switch vr.Type().DataType { + case DateOid: + *v = decodeDate(vr) + case TimestampTzOid: + *v = decodeTimestampTz(vr) + case TimestampOid: + *v = decodeTimestamp(vr) + default: + return fmt.Errorf("Can't convert OID %v to time.Time", vr.Type().DataType) + } + case *net.IP: + ipnet := decodeInet(vr) + if oneCount, bitCount := ipnet.Mask.Size(); oneCount != bitCount { + return fmt.Errorf("Cannot decode netmask into *net.IP") + } + *v = ipnet.IP + case *[]net.IP: + ipnets := decodeInetArray(vr) + ips := make([]net.IP, len(ipnets)) + for i, ipnet := range ipnets { + if oneCount, bitCount := ipnet.Mask.Size(); oneCount != bitCount { + return fmt.Errorf("Cannot decode netmask into *net.IP") + } + ips[i] = ipnet.IP + } + *v = ips + case *net.IPNet: + *v = decodeInet(vr) + case *[]net.IPNet: + *v = decodeInetArray(vr) + default: + if v := reflect.ValueOf(d); v.Kind() == reflect.Ptr { + el := v.Elem() + switch el.Kind() { + // if d is a pointer to pointer, strip the pointer and try again + case reflect.Ptr: + // -1 is a null value + if vr.Len() == -1 { + if !el.IsNil() { + // if the destination pointer is not nil, nil it out + el.Set(reflect.Zero(el.Type())) + } + return nil + } + if el.IsNil() { + // allocate destination + el.Set(reflect.New(el.Type().Elem())) + } + d = el.Interface() + return Decode(vr, d) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := decodeInt(vr) + if el.OverflowInt(n) { + return fmt.Errorf("Scan cannot decode %d into %T", n, d) + } + el.SetInt(n) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + n := decodeInt(vr) + if n < 0 { + return fmt.Errorf("%d is less than zero for %T", n, d) + } + if el.OverflowUint(uint64(n)) { + return fmt.Errorf("Scan cannot decode %d into %T", n, d) + } + el.SetUint(uint64(n)) + return nil + case reflect.String: + el.SetString(decodeText(vr)) + return nil + } + } + return fmt.Errorf("Scan cannot decode into %T", d) + } + + return nil +} + +func decodeBool(vr *ValueReader) bool { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into bool")) + return false + } + + if vr.Type().DataType != BoolOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into bool", vr.Type().DataType))) + return false + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return false + } + + if vr.Len() != 1 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an bool: %d", vr.Len()))) + return false + } + + b := vr.ReadByte() + return b != 0 +} + +func encodeBool(w *WriteBuf, oid Oid, value bool) error { + if oid != BoolOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "bool", oid) + } + + w.WriteInt32(1) + + var n byte + if value { + n = 1 + } + + w.WriteByte(n) + + return nil +} + +func decodeInt(vr *ValueReader) int64 { + switch vr.Type().DataType { + case Int2Oid: + return int64(decodeInt2(vr)) + case Int4Oid: + return int64(decodeInt4(vr)) + case Int8Oid: + return int64(decodeInt8(vr)) + } + + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into any integer type", vr.Type().DataType))) + return 0 +} + +func decodeInt8(vr *ValueReader) int64 { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into int64")) + return 0 + } + + if vr.Type().DataType != Int8Oid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int8", vr.Type().DataType))) + return 0 + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return 0 + } + + if vr.Len() != 8 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8: %d", vr.Len()))) + return 0 + } + + return vr.ReadInt64() +} + +func decodeChar(vr *ValueReader) Char { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into char")) + return Char(0) + } + + if vr.Type().DataType != CharOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into char", vr.Type().DataType))) + return Char(0) + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return Char(0) + } + + if vr.Len() != 1 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for a char: %d", vr.Len()))) + return Char(0) + } + + return Char(vr.ReadByte()) +} + +func decodeInt2(vr *ValueReader) int16 { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into int16")) + return 0 + } + + if vr.Type().DataType != Int2Oid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int16", vr.Type().DataType))) + return 0 + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return 0 + } + + if vr.Len() != 2 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2: %d", vr.Len()))) + return 0 + } + + return vr.ReadInt16() +} + +func encodeInt(w *WriteBuf, oid Oid, value int) error { + switch oid { + case Int2Oid: + if value < math.MinInt16 { + return fmt.Errorf("%d is less than min pg:int2", value) + } else if value > math.MaxInt16 { + return fmt.Errorf("%d is greater than max pg:int2", value) + } + w.WriteInt32(2) + w.WriteInt16(int16(value)) + case Int4Oid: + if value < math.MinInt32 { + return fmt.Errorf("%d is less than min pg:int4", value) + } else if value > math.MaxInt32 { + return fmt.Errorf("%d is greater than max pg:int4", value) + } + w.WriteInt32(4) + w.WriteInt32(int32(value)) + case Int8Oid: + if int64(value) <= int64(math.MaxInt64) { + w.WriteInt32(8) + w.WriteInt64(int64(value)) + } else { + return fmt.Errorf("%d is larger than max int64 %d", value, int64(math.MaxInt64)) + } + default: + return fmt.Errorf("cannot encode %s into oid %v", "int8", oid) + } + + return nil +} + +func encodeUInt(w *WriteBuf, oid Oid, value uint) error { + switch oid { + case Int2Oid: + if value > math.MaxInt16 { + return fmt.Errorf("%d is greater than max pg:int2", value) + } + w.WriteInt32(2) + w.WriteInt16(int16(value)) + case Int4Oid: + if value > math.MaxInt32 { + return fmt.Errorf("%d is greater than max pg:int4", value) + } + w.WriteInt32(4) + w.WriteInt32(int32(value)) + case Int8Oid: + //****** Changed value to int64(value) and math.MaxInt64 to int64(math.MaxInt64) + if int64(value) > int64(math.MaxInt64) { + return fmt.Errorf("%d is greater than max pg:int8", value) + } + w.WriteInt32(8) + w.WriteInt64(int64(value)) + + default: + return fmt.Errorf("cannot encode %s into oid %v", "uint8", oid) + } + + return nil +} + +func encodeChar(w *WriteBuf, oid Oid, value Char) error { + w.WriteInt32(1) + w.WriteByte(byte(value)) + return nil +} + +func encodeInt8(w *WriteBuf, oid Oid, value int8) error { + switch oid { + case Int2Oid: + w.WriteInt32(2) + w.WriteInt16(int16(value)) + case Int4Oid: + w.WriteInt32(4) + w.WriteInt32(int32(value)) + case Int8Oid: + w.WriteInt32(8) + w.WriteInt64(int64(value)) + default: + return fmt.Errorf("cannot encode %s into oid %v", "int8", oid) + } + + return nil +} + +func encodeUInt8(w *WriteBuf, oid Oid, value uint8) error { + switch oid { + case Int2Oid: + w.WriteInt32(2) + w.WriteInt16(int16(value)) + case Int4Oid: + w.WriteInt32(4) + w.WriteInt32(int32(value)) + case Int8Oid: + w.WriteInt32(8) + w.WriteInt64(int64(value)) + default: + return fmt.Errorf("cannot encode %s into oid %v", "uint8", oid) + } + + return nil +} + +func encodeInt16(w *WriteBuf, oid Oid, value int16) error { + switch oid { + case Int2Oid: + w.WriteInt32(2) + w.WriteInt16(value) + case Int4Oid: + w.WriteInt32(4) + w.WriteInt32(int32(value)) + case Int8Oid: + w.WriteInt32(8) + w.WriteInt64(int64(value)) + default: + return fmt.Errorf("cannot encode %s into oid %v", "int16", oid) + } + + return nil +} + +func encodeUInt16(w *WriteBuf, oid Oid, value uint16) error { + switch oid { + case Int2Oid: + if value <= math.MaxInt16 { + w.WriteInt32(2) + w.WriteInt16(int16(value)) + } else { + return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16) + } + case Int4Oid: + w.WriteInt32(4) + w.WriteInt32(int32(value)) + case Int8Oid: + w.WriteInt32(8) + w.WriteInt64(int64(value)) + default: + return fmt.Errorf("cannot encode %s into oid %v", "int16", oid) + } + + return nil +} + +func encodeInt32(w *WriteBuf, oid Oid, value int32) error { + switch oid { + case Int2Oid: + if value <= math.MaxInt16 { + w.WriteInt32(2) + w.WriteInt16(int16(value)) + } else { + return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16) + } + case Int4Oid: + w.WriteInt32(4) + w.WriteInt32(value) + case Int8Oid: + w.WriteInt32(8) + w.WriteInt64(int64(value)) + default: + return fmt.Errorf("cannot encode %s into oid %v", "int32", oid) + } + + return nil +} + +func encodeUInt32(w *WriteBuf, oid Oid, value uint32) error { + switch oid { + case Int2Oid: + if value <= math.MaxInt16 { + w.WriteInt32(2) + w.WriteInt16(int16(value)) + } else { + return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16) + } + case Int4Oid: + if value <= math.MaxInt32 { + w.WriteInt32(4) + w.WriteInt32(int32(value)) + } else { + return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32) + } + case Int8Oid: + w.WriteInt32(8) + w.WriteInt64(int64(value)) + default: + return fmt.Errorf("cannot encode %s into oid %v", "uint32", oid) + } + + return nil +} + +func encodeInt64(w *WriteBuf, oid Oid, value int64) error { + switch oid { + case Int2Oid: + if value <= math.MaxInt16 { + w.WriteInt32(2) + w.WriteInt16(int16(value)) + } else { + return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16) + } + case Int4Oid: + if value <= math.MaxInt32 { + w.WriteInt32(4) + w.WriteInt32(int32(value)) + } else { + return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32) + } + case Int8Oid: + w.WriteInt32(8) + w.WriteInt64(value) + default: + return fmt.Errorf("cannot encode %s into oid %v", "int64", oid) + } + + return nil +} + +func encodeUInt64(w *WriteBuf, oid Oid, value uint64) error { + switch oid { + case Int2Oid: + if value <= math.MaxInt16 { + w.WriteInt32(2) + w.WriteInt16(int16(value)) + } else { + return fmt.Errorf("%d is greater than max int16 %d", value, math.MaxInt16) + } + case Int4Oid: + if value <= math.MaxInt32 { + w.WriteInt32(4) + w.WriteInt32(int32(value)) + } else { + return fmt.Errorf("%d is greater than max int32 %d", value, math.MaxInt32) + } + case Int8Oid: + + if value <= math.MaxInt64 { + w.WriteInt32(8) + w.WriteInt64(int64(value)) + } else { + return fmt.Errorf("%d is greater than max int64 %d", value, int64(math.MaxInt64)) + } + default: + return fmt.Errorf("cannot encode %s into oid %v", "uint64", oid) + } + + return nil +} + +func decodeInt4(vr *ValueReader) int32 { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into int32")) + return 0 + } + + if vr.Type().DataType != Int4Oid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into int32", vr.Type().DataType))) + return 0 + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return 0 + } + + if vr.Len() != 4 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4: %d", vr.Len()))) + return 0 + } + + return vr.ReadInt32() +} + +func decodeOid(vr *ValueReader) Oid { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into Oid")) + return Oid(0) + } + + if vr.Type().DataType != OidOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Oid", vr.Type().DataType))) + return Oid(0) + } + + // Oid needs to decode text format because it is used in loadPgTypes + switch vr.Type().FormatCode { + case TextFormatCode: + s := vr.ReadString(vr.Len()) + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s))) + } + return Oid(n) + case BinaryFormatCode: + if vr.Len() != 4 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len()))) + return Oid(0) + } + return Oid(vr.ReadInt32()) + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return Oid(0) + } +} + +func encodeOid(w *WriteBuf, oid Oid, value Oid) error { + if oid != OidOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Oid", oid) + } + + w.WriteInt32(4) + w.WriteUint32(uint32(value)) + + return nil +} + +func decodeXid(vr *ValueReader) Xid { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into Xid")) + return Xid(0) + } + + if vr.Type().DataType != XidOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Xid", vr.Type().DataType))) + return Xid(0) + } + + // Unlikely Xid will ever go over the wire as text format, but who knows? + switch vr.Type().FormatCode { + case TextFormatCode: + s := vr.ReadString(vr.Len()) + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s))) + } + return Xid(n) + case BinaryFormatCode: + if vr.Len() != 4 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len()))) + return Xid(0) + } + return Xid(vr.ReadUint32()) + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return Xid(0) + } +} + +func encodeXid(w *WriteBuf, oid Oid, value Xid) error { + if oid != XidOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Xid", oid) + } + + w.WriteInt32(4) + w.WriteUint32(uint32(value)) + + return nil +} + +func decodeCid(vr *ValueReader) Cid { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into Cid")) + return Cid(0) + } + + if vr.Type().DataType != CidOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Cid", vr.Type().DataType))) + return Cid(0) + } + + // Unlikely Cid will ever go over the wire as text format, but who knows? + switch vr.Type().FormatCode { + case TextFormatCode: + s := vr.ReadString(vr.Len()) + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s))) + } + return Cid(n) + case BinaryFormatCode: + if vr.Len() != 4 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len()))) + return Cid(0) + } + return Cid(vr.ReadUint32()) + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return Cid(0) + } +} + +func encodeCid(w *WriteBuf, oid Oid, value Cid) error { + if oid != CidOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Cid", oid) + } + + w.WriteInt32(4) + w.WriteUint32(uint32(value)) + + return nil +} + +// Note that we do not match negative numbers, because neither the +// BlockNumber nor OffsetNumber of a Tid can be negative. +var tidRegexp *regexp.Regexp = regexp.MustCompile(`^\((\d*),(\d*)\)$`) + +func decodeTid(vr *ValueReader) Tid { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into Tid")) + return Tid{BlockNumber: 0, OffsetNumber: 0} + } + + if vr.Type().DataType != TidOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into pgx.Tid", vr.Type().DataType))) + return Tid{BlockNumber: 0, OffsetNumber: 0} + } + + // Unlikely Tid will ever go over the wire as text format, but who knows? + switch vr.Type().FormatCode { + case TextFormatCode: + s := vr.ReadString(vr.Len()) + + match := tidRegexp.FindStringSubmatch(s) + if match == nil { + vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid Oid: %v", s))) + return Tid{BlockNumber: 0, OffsetNumber: 0} + } + + blockNumber, err := strconv.ParseUint(s, 10, 16) + if err != nil { + vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid BlockNumber part of a Tid: %v", s))) + } + + offsetNumber, err := strconv.ParseUint(s, 10, 16) + if err != nil { + vr.Fatal(ProtocolError(fmt.Sprintf("Received invalid offsetNumber part of a Tid: %v", s))) + } + return Tid{BlockNumber: uint32(blockNumber), OffsetNumber: uint16(offsetNumber)} + case BinaryFormatCode: + if vr.Len() != 6 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an Oid: %d", vr.Len()))) + return Tid{BlockNumber: 0, OffsetNumber: 0} + } + return Tid{BlockNumber: vr.ReadUint32(), OffsetNumber: vr.ReadUint16()} + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return Tid{BlockNumber: 0, OffsetNumber: 0} + } +} + +func encodeTid(w *WriteBuf, oid Oid, value Tid) error { + if oid != TidOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "pgx.Tid", oid) + } + + w.WriteInt32(6) + w.WriteUint32(value.BlockNumber) + w.WriteUint16(value.OffsetNumber) + + return nil +} + +func decodeFloat4(vr *ValueReader) float32 { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into float32")) + return 0 + } + + if vr.Type().DataType != Float4Oid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into float32", vr.Type().DataType))) + return 0 + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return 0 + } + + if vr.Len() != 4 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4: %d", vr.Len()))) + return 0 + } + + i := vr.ReadInt32() + return math.Float32frombits(uint32(i)) +} + +func encodeFloat32(w *WriteBuf, oid Oid, value float32) error { + switch oid { + case Float4Oid: + w.WriteInt32(4) + w.WriteInt32(int32(math.Float32bits(value))) + case Float8Oid: + w.WriteInt32(8) + w.WriteInt64(int64(math.Float64bits(float64(value)))) + default: + return fmt.Errorf("cannot encode %s into oid %v", "float32", oid) + } + + return nil +} + +func decodeFloat8(vr *ValueReader) float64 { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into float64")) + return 0 + } + + if vr.Type().DataType != Float8Oid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into float64", vr.Type().DataType))) + return 0 + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return 0 + } + + if vr.Len() != 8 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float8: %d", vr.Len()))) + return 0 + } + + i := vr.ReadInt64() + return math.Float64frombits(uint64(i)) +} + +func encodeFloat64(w *WriteBuf, oid Oid, value float64) error { + switch oid { + case Float8Oid: + w.WriteInt32(8) + w.WriteInt64(int64(math.Float64bits(value))) + default: + return fmt.Errorf("cannot encode %s into oid %v", "float64", oid) + } + + return nil +} + +func decodeText(vr *ValueReader) string { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into string")) + return "" + } + + return vr.ReadString(vr.Len()) +} + +func encodeString(w *WriteBuf, oid Oid, value string) error { + w.WriteInt32(int32(len(value))) + w.WriteBytes([]byte(value)) + return nil +} + +func decodeBytea(vr *ValueReader) []byte { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != ByteaOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []byte", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + return vr.ReadBytes(vr.Len()) +} + +func encodeByteSlice(w *WriteBuf, oid Oid, value []byte) error { + w.WriteInt32(int32(len(value))) + w.WriteBytes(value) + + return nil +} + +func decodeJSON(vr *ValueReader, d interface{}) error { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != JsonOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into json", vr.Type().DataType))) + } + + bytes := vr.ReadBytes(vr.Len()) + err := json.Unmarshal(bytes, d) + if err != nil { + vr.Fatal(err) + } + return err +} + +func encodeJSON(w *WriteBuf, oid Oid, value interface{}) error { + if oid != JsonOid { + return fmt.Errorf("cannot encode JSON into oid %v", oid) + } + + s, err := json.Marshal(value) + if err != nil { + return fmt.Errorf("Failed to encode json from type: %T", value) + } + + w.WriteInt32(int32(len(s))) + w.WriteBytes(s) + + return nil +} + +func decodeJSONB(vr *ValueReader, d interface{}) error { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != JsonbOid { + err := ProtocolError(fmt.Sprintf("Cannot decode oid %v into jsonb", vr.Type().DataType)) + vr.Fatal(err) + return err + } + + bytes := vr.ReadBytes(vr.Len()) + if vr.Type().FormatCode == BinaryFormatCode { + if bytes[0] != 1 { + err := ProtocolError(fmt.Sprintf("Unknown jsonb format byte: %x", bytes[0])) + vr.Fatal(err) + return err + } + bytes = bytes[1:] + } + + err := json.Unmarshal(bytes, d) + if err != nil { + vr.Fatal(err) + } + return err +} + +func encodeJSONB(w *WriteBuf, oid Oid, value interface{}) error { + if oid != JsonbOid { + return fmt.Errorf("cannot encode JSON into oid %v", oid) + } + + s, err := json.Marshal(value) + if err != nil { + return fmt.Errorf("Failed to encode json from type: %T", value) + } + + w.WriteInt32(int32(len(s) + 1)) + w.WriteByte(1) // JSONB format header + w.WriteBytes(s) + + return nil +} + +func decodeDate(vr *ValueReader) time.Time { + var zeroTime time.Time + + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into time.Time")) + return zeroTime + } + + if vr.Type().DataType != DateOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType))) + return zeroTime + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return zeroTime + } + + if vr.Len() != 4 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an date: %d", vr.Len()))) + } + dayOffset := vr.ReadInt32() + return time.Date(2000, 1, int(1+dayOffset), 0, 0, 0, 0, time.Local) +} + +func encodeTime(w *WriteBuf, oid Oid, value time.Time) error { + switch oid { + case DateOid: + tUnix := time.Date(value.Year(), value.Month(), value.Day(), 0, 0, 0, 0, time.UTC).Unix() + dateEpoch := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC).Unix() + + secSinceDateEpoch := tUnix - dateEpoch + daysSinceDateEpoch := secSinceDateEpoch / 86400 + + w.WriteInt32(4) + w.WriteInt32(int32(daysSinceDateEpoch)) + + return nil + case TimestampTzOid, TimestampOid: + microsecSinceUnixEpoch := value.Unix()*1000000 + int64(value.Nanosecond())/1000 + microsecSinceY2K := microsecSinceUnixEpoch - microsecFromUnixEpochToY2K + + w.WriteInt32(8) + w.WriteInt64(microsecSinceY2K) + + return nil + default: + return fmt.Errorf("cannot encode %s into oid %v", "time.Time", oid) + } +} + +const microsecFromUnixEpochToY2K = 946684800 * 1000000 + +func decodeTimestampTz(vr *ValueReader) time.Time { + var zeroTime time.Time + + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into time.Time")) + return zeroTime + } + + if vr.Type().DataType != TimestampTzOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType))) + return zeroTime + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return zeroTime + } + + if vr.Len() != 8 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an timestamptz: %d", vr.Len()))) + return zeroTime + } + + microsecSinceY2K := vr.ReadInt64() + microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K + return time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000) +} + +func decodeTimestamp(vr *ValueReader) time.Time { + var zeroTime time.Time + + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into timestamp")) + return zeroTime + } + + if vr.Type().DataType != TimestampOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into time.Time", vr.Type().DataType))) + return zeroTime + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return zeroTime + } + + if vr.Len() != 8 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an timestamp: %d", vr.Len()))) + return zeroTime + } + + microsecSinceY2K := vr.ReadInt64() + microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K + return time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000) +} + +func decodeInet(vr *ValueReader) net.IPNet { + var zero net.IPNet + + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into net.IPNet")) + return zero + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return zero + } + + pgType := vr.Type() + if pgType.DataType != InetOid && pgType.DataType != CidrOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into %s", pgType.DataType, pgType.Name))) + return zero + } + if vr.Len() != 8 && vr.Len() != 20 { + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for a %s: %d", pgType.Name, vr.Len()))) + return zero + } + + vr.ReadByte() // ignore family + bits := vr.ReadByte() + vr.ReadByte() // ignore is_cidr + addressLength := vr.ReadByte() + + var ipnet net.IPNet + ipnet.IP = vr.ReadBytes(int32(addressLength)) + ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8) + + return ipnet +} + +func encodeIPNet(w *WriteBuf, oid Oid, value net.IPNet) error { + if oid != InetOid && oid != CidrOid { + return fmt.Errorf("cannot encode %s into oid %v", "net.IPNet", oid) + } + + var size int32 + var family byte + switch len(value.IP) { + case net.IPv4len: + size = 8 + family = *w.conn.pgsqlAfInet + case net.IPv6len: + size = 20 + family = *w.conn.pgsqlAfInet6 + default: + return fmt.Errorf("Unexpected IP length: %v", len(value.IP)) + } + + w.WriteInt32(size) + w.WriteByte(family) + ones, _ := value.Mask.Size() + w.WriteByte(byte(ones)) + w.WriteByte(0) // is_cidr is ignored on server + w.WriteByte(byte(len(value.IP))) + w.WriteBytes(value.IP) + + return nil +} + +func encodeIP(w *WriteBuf, oid Oid, value net.IP) error { + if oid != InetOid && oid != CidrOid { + return fmt.Errorf("cannot encode %s into oid %v", "net.IP", oid) + } + + var ipnet net.IPNet + ipnet.IP = value + bitCount := len(value) * 8 + ipnet.Mask = net.CIDRMask(bitCount, bitCount) + return encodeIPNet(w, oid, ipnet) +} + +func decodeRecord(vr *ValueReader) []interface{} { + if vr.Len() == -1 { + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + if vr.Type().DataType != RecordOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []interface{}", vr.Type().DataType))) + return nil + } + + valueCount := vr.ReadInt32() + record := make([]interface{}, 0, int(valueCount)) + + for i := int32(0); i < valueCount; i++ { + fd := FieldDescription{FormatCode: BinaryFormatCode} + fieldVR := ValueReader{mr: vr.mr, fd: &fd} + fd.DataType = vr.ReadOid() + fieldVR.valueBytesRemaining = vr.ReadInt32() + vr.valueBytesRemaining -= fieldVR.valueBytesRemaining + + switch fd.DataType { + case BoolOid: + record = append(record, decodeBool(&fieldVR)) + case ByteaOid: + record = append(record, decodeBytea(&fieldVR)) + case Int8Oid: + record = append(record, decodeInt8(&fieldVR)) + case Int2Oid: + record = append(record, decodeInt2(&fieldVR)) + case Int4Oid: + record = append(record, decodeInt4(&fieldVR)) + case OidOid: + record = append(record, decodeOid(&fieldVR)) + case Float4Oid: + record = append(record, decodeFloat4(&fieldVR)) + case Float8Oid: + record = append(record, decodeFloat8(&fieldVR)) + case DateOid: + record = append(record, decodeDate(&fieldVR)) + case TimestampTzOid: + record = append(record, decodeTimestampTz(&fieldVR)) + case TimestampOid: + record = append(record, decodeTimestamp(&fieldVR)) + case InetOid, CidrOid: + record = append(record, decodeInet(&fieldVR)) + case TextOid, VarcharOid, UnknownOid: + record = append(record, decodeText(&fieldVR)) + default: + vr.Fatal(fmt.Errorf("decodeRecord cannot decode oid %d", fd.DataType)) + return nil + } + + // Consume any remaining data + if fieldVR.Len() > 0 { + fieldVR.ReadBytes(fieldVR.Len()) + } + + if fieldVR.Err() != nil { + vr.Fatal(fieldVR.Err()) + return nil + } + } + + return record +} + +func decode1dArrayHeader(vr *ValueReader) (length int32, err error) { + numDims := vr.ReadInt32() + if numDims > 1 { + return 0, ProtocolError(fmt.Sprintf("Expected array to have 0 or 1 dimension, but it had %v", numDims)) + } + + vr.ReadInt32() // 0 if no nulls / 1 if there is one or more nulls -- but we don't care + vr.ReadInt32() // element oid + + if numDims == 0 { + return 0, nil + } + + length = vr.ReadInt32() + + idxFirstElem := vr.ReadInt32() + if idxFirstElem != 1 { + return 0, ProtocolError(fmt.Sprintf("Expected array's first element to start a index 1, but it is %d", idxFirstElem)) + } + + return length, nil +} + +func decodeBoolArray(vr *ValueReader) []bool { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != BoolArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []bool", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]bool, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 1: + if vr.ReadByte() == 1 { + a[i] = true + } + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an bool element: %d", elSize))) + return nil + } + } + + return a +} + +func encodeBoolSlice(w *WriteBuf, oid Oid, slice []bool) error { + if oid != BoolArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]bool", oid) + } + + encodeArrayHeader(w, BoolOid, len(slice), 5) + for _, v := range slice { + w.WriteInt32(1) + var b byte + if v { + b = 1 + } + w.WriteByte(b) + } + + return nil +} + +func decodeByteaArray(vr *ValueReader) [][]byte { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != ByteaArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into [][]byte", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([][]byte, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + a[i] = vr.ReadBytes(elSize) + } + } + + return a +} + +func encodeByteSliceSlice(w *WriteBuf, oid Oid, value [][]byte) error { + if oid != ByteaArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[][]byte", oid) + } + + size := 20 // array header size + for _, el := range value { + size += 4 + len(el) + } + + w.WriteInt32(int32(size)) + + w.WriteInt32(1) // number of dimensions + w.WriteInt32(0) // no nulls + w.WriteInt32(int32(ByteaOid)) // type of elements + w.WriteInt32(int32(len(value))) // number of elements + w.WriteInt32(1) // index of first element + + for _, el := range value { + encodeByteSlice(w, ByteaOid, el) + } + + return nil +} + +func decodeInt2Array(vr *ValueReader) []int16 { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != Int2ArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int16", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]int16, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 2: + a[i] = vr.ReadInt16() + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2 element: %d", elSize))) + return nil + } + } + + return a +} + +func decodeInt2ArrayToUInt(vr *ValueReader) []uint16 { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != Int2ArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint16", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]uint16, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 2: + tmp := vr.ReadInt16() + if tmp < 0 { + vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint16", tmp))) + return nil + } + a[i] = uint16(tmp) + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int2 element: %d", elSize))) + return nil + } + } + + return a +} + +func encodeInt16Slice(w *WriteBuf, oid Oid, slice []int16) error { + if oid != Int2ArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]int16", oid) + } + + encodeArrayHeader(w, Int2Oid, len(slice), 6) + for _, v := range slice { + w.WriteInt32(2) + w.WriteInt16(v) + } + + return nil +} + +func encodeUInt16Slice(w *WriteBuf, oid Oid, slice []uint16) error { + if oid != Int2ArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint16", oid) + } + + encodeArrayHeader(w, Int2Oid, len(slice), 6) + for _, v := range slice { + if v <= math.MaxInt16 { + w.WriteInt32(2) + w.WriteInt16(int16(v)) + } else { + return fmt.Errorf("%d is greater than max smallint %d", v, math.MaxInt16) + } + } + + return nil +} + +func decodeInt4Array(vr *ValueReader) []int32 { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != Int4ArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int32", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]int32, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 4: + a[i] = vr.ReadInt32() + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4 element: %d", elSize))) + return nil + } + } + + return a +} + +func decodeInt4ArrayToUInt(vr *ValueReader) []uint32 { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != Int4ArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint32", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]uint32, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 4: + tmp := vr.ReadInt32() + if tmp < 0 { + vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint32", tmp))) + return nil + } + a[i] = uint32(tmp) + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int4 element: %d", elSize))) + return nil + } + } + + return a +} + +func encodeInt32Slice(w *WriteBuf, oid Oid, slice []int32) error { + if oid != Int4ArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]int32", oid) + } + + encodeArrayHeader(w, Int4Oid, len(slice), 8) + for _, v := range slice { + w.WriteInt32(4) + w.WriteInt32(v) + } + + return nil +} + +func encodeUInt32Slice(w *WriteBuf, oid Oid, slice []uint32) error { + if oid != Int4ArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint32", oid) + } + + encodeArrayHeader(w, Int4Oid, len(slice), 8) + for _, v := range slice { + if v <= math.MaxInt32 { + w.WriteInt32(4) + w.WriteInt32(int32(v)) + } else { + return fmt.Errorf("%d is greater than max integer %d", v, math.MaxInt32) + } + } + + return nil +} + +func decodeInt8Array(vr *ValueReader) []int64 { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != Int8ArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []int64", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]int64, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 8: + a[i] = vr.ReadInt64() + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8 element: %d", elSize))) + return nil + } + } + + return a +} + +func decodeInt8ArrayToUInt(vr *ValueReader) []uint64 { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != Int8ArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []uint64", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]uint64, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 8: + tmp := vr.ReadInt64() + if tmp < 0 { + vr.Fatal(ProtocolError(fmt.Sprintf("%d is less than zero for uint64", tmp))) + return nil + } + a[i] = uint64(tmp) + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an int8 element: %d", elSize))) + return nil + } + } + + return a +} + +func encodeInt64Slice(w *WriteBuf, oid Oid, slice []int64) error { + if oid != Int8ArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]int64", oid) + } + + encodeArrayHeader(w, Int8Oid, len(slice), 12) + for _, v := range slice { + w.WriteInt32(8) + w.WriteInt64(v) + } + + return nil +} + +func encodeUInt64Slice(w *WriteBuf, oid Oid, slice []uint64) error { + if oid != Int8ArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]uint64", oid) + } + + encodeArrayHeader(w, Int8Oid, len(slice), 12) + for _, v := range slice { + if v <= math.MaxInt64 { + w.WriteInt32(8) + w.WriteInt64(int64(v)) + } else { + return fmt.Errorf("%d is greater than max bigint %d", v, int64(math.MaxInt64)) + } + } + + return nil +} + +func decodeFloat4Array(vr *ValueReader) []float32 { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != Float4ArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []float32", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]float32, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 4: + n := vr.ReadInt32() + a[i] = math.Float32frombits(uint32(n)) + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4 element: %d", elSize))) + return nil + } + } + + return a +} + +func encodeFloat32Slice(w *WriteBuf, oid Oid, slice []float32) error { + if oid != Float4ArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]float32", oid) + } + + encodeArrayHeader(w, Float4Oid, len(slice), 8) + for _, v := range slice { + w.WriteInt32(4) + w.WriteInt32(int32(math.Float32bits(v))) + } + + return nil +} + +func decodeFloat8Array(vr *ValueReader) []float64 { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != Float8ArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []float64", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]float64, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 8: + n := vr.ReadInt64() + a[i] = math.Float64frombits(uint64(n)) + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an float4 element: %d", elSize))) + return nil + } + } + + return a +} + +func encodeFloat64Slice(w *WriteBuf, oid Oid, slice []float64) error { + if oid != Float8ArrayOid { + return fmt.Errorf("cannot encode Go %s into oid %d", "[]float64", oid) + } + + encodeArrayHeader(w, Float8Oid, len(slice), 12) + for _, v := range slice { + w.WriteInt32(8) + w.WriteInt64(int64(math.Float64bits(v))) + } + + return nil +} + +func decodeTextArray(vr *ValueReader) []string { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != TextArrayOid && vr.Type().DataType != VarcharArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []string", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]string, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + if elSize == -1 { + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + } + + a[i] = vr.ReadString(elSize) + } + + return a +} + +// escapeAclItem escapes an AclItem before it is added to +// its aclitem[] string representation. The PostgreSQL aclitem +// datatype itself can need escapes because it follows the +// formatting rules of SQL identifiers. Think of this function +// as escaping the escapes, so that PostgreSQL's array parser +// will do the right thing. +func escapeAclItem(acl string) (string, error) { + var escapedAclItem bytes.Buffer + reader := strings.NewReader(acl) + for { + rn, _, err := reader.ReadRune() + if err != nil { + if err == io.EOF { + // Here, EOF is an expected end state, not an error. + return escapedAclItem.String(), nil + } + // This error was not expected + return "", err + } + if needsEscape(rn) { + escapedAclItem.WriteRune('\\') + } + escapedAclItem.WriteRune(rn) + } +} + +// needsEscape determines whether or not a rune needs escaping +// before being placed in the textual representation of an +// aclitem[] array. +func needsEscape(rn rune) bool { + return rn == '\\' || rn == ',' || rn == '"' || rn == '}' +} + +// encodeAclItemSlice encodes a slice of AclItems in +// their textual represention for PostgreSQL. +func encodeAclItemSlice(w *WriteBuf, oid Oid, aclitems []AclItem) error { + strs := make([]string, len(aclitems)) + var escapedAclItem string + var err error + for i := range strs { + escapedAclItem, err = escapeAclItem(string(aclitems[i])) + if err != nil { + return err + } + strs[i] = string(escapedAclItem) + } + + var buf bytes.Buffer + buf.WriteRune('{') + buf.WriteString(strings.Join(strs, ",")) + buf.WriteRune('}') + str := buf.String() + w.WriteInt32(int32(len(str))) + w.WriteBytes([]byte(str)) + return nil +} + +// parseAclItemArray parses the textual representation +// of the aclitem[] type. The textual representation is chosen because +// Pg's src/backend/utils/adt/acl.c has only in/out (text) not send/recv (bin). +// See https://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +// for formatting notes. +func parseAclItemArray(arr string) ([]AclItem, error) { + reader := strings.NewReader(arr) + // Difficult to guess a performant initial capacity for a slice of + // aclitems, but let's go with 5. + aclItems := make([]AclItem, 0, 5) + // A single value + aclItem := AclItem("") + for { + // Grab the first/next/last rune to see if we are dealing with a + // quoted value, an unquoted value, or the end of the string. + rn, _, err := reader.ReadRune() + if err != nil { + if err == io.EOF { + // Here, EOF is an expected end state, not an error. + return aclItems, nil + } + // This error was not expected + return nil, err + } + + if rn == '"' { + // Discard the opening quote of the quoted value. + aclItem, err = parseQuotedAclItem(reader) + } else { + // We have just read the first rune of an unquoted (bare) value; + // put it back so that ParseBareValue can read it. + err := reader.UnreadRune() + if err != nil { + return nil, err + } + aclItem, err = parseBareAclItem(reader) + } + + if err != nil { + if err == io.EOF { + // Here, EOF is an expected end state, not an error.. + aclItems = append(aclItems, aclItem) + return aclItems, nil + } + // This error was not expected. + return nil, err + } + aclItems = append(aclItems, aclItem) + } +} + +// parseBareAclItem parses a bare (unquoted) aclitem from reader +func parseBareAclItem(reader *strings.Reader) (AclItem, error) { + var aclItem bytes.Buffer + for { + rn, _, err := reader.ReadRune() + if err != nil { + // Return the read value in case the error is a harmless io.EOF. + // (io.EOF marks the end of a bare aclitem at the end of a string) + return AclItem(aclItem.String()), err + } + if rn == ',' { + // A comma marks the end of a bare aclitem. + return AclItem(aclItem.String()), nil + } else { + aclItem.WriteRune(rn) + } + } +} + +// parseQuotedAclItem parses an aclitem which is in double quotes from reader +func parseQuotedAclItem(reader *strings.Reader) (AclItem, error) { + var aclItem bytes.Buffer + for { + rn, escaped, err := readPossiblyEscapedRune(reader) + if err != nil { + if err == io.EOF { + // Even when it is the last value, the final rune of + // a quoted aclitem should be the final closing quote, not io.EOF. + return AclItem(""), fmt.Errorf("unexpected end of quoted value") + } + // Return the read aclitem in case the error is a harmless io.EOF, + // which will be determined by the caller. + return AclItem(aclItem.String()), err + } + if !escaped && rn == '"' { + // An unescaped double quote marks the end of a quoted value. + // The next rune should either be a comma or the end of the string. + rn, _, err := reader.ReadRune() + if err != nil { + // Return the read value in case the error is a harmless io.EOF, + // which will be determined by the caller. + return AclItem(aclItem.String()), err + } + if rn != ',' { + return AclItem(""), fmt.Errorf("unexpected rune after quoted value") + } + return AclItem(aclItem.String()), nil + } + aclItem.WriteRune(rn) + } +} + +// Returns the next rune from r, unless it is a backslash; +// in that case, it returns the rune after the backslash. The second +// return value tells us whether or not the rune was +// preceeded by a backslash (escaped). +func readPossiblyEscapedRune(reader *strings.Reader) (rune, bool, error) { + rn, _, err := reader.ReadRune() + if err != nil { + return 0, false, err + } + if rn == '\\' { + // Discard the backslash and read the next rune. + rn, _, err = reader.ReadRune() + if err != nil { + return 0, false, err + } + return rn, true, nil + } + return rn, false, nil +} + +func decodeAclItemArray(vr *ValueReader) []AclItem { + if vr.Len() == -1 { + vr.Fatal(ProtocolError("Cannot decode null into []AclItem")) + return nil + } + + str := vr.ReadString(vr.Len()) + + // Short-circuit empty array. + if str == "{}" { + return []AclItem{} + } + + // Remove the '{' at the front and the '}' at the end, + // so that parseAclItemArray doesn't have to deal with them. + str = str[1 : len(str)-1] + aclItems, err := parseAclItemArray(str) + if err != nil { + vr.Fatal(ProtocolError(err.Error())) + return nil + } + return aclItems +} + +func encodeStringSlice(w *WriteBuf, oid Oid, slice []string) error { + var elOid Oid + switch oid { + case VarcharArrayOid: + elOid = VarcharOid + case TextArrayOid: + elOid = TextOid + default: + return fmt.Errorf("cannot encode Go %s into oid %d", "[]string", oid) + } + + var totalStringSize int + for _, v := range slice { + totalStringSize += len(v) + } + + size := 20 + len(slice)*4 + totalStringSize + w.WriteInt32(int32(size)) + + w.WriteInt32(1) // number of dimensions + w.WriteInt32(0) // no nulls + w.WriteInt32(int32(elOid)) // type of elements + w.WriteInt32(int32(len(slice))) // number of elements + w.WriteInt32(1) // index of first element + + for _, v := range slice { + w.WriteInt32(int32(len(v))) + w.WriteBytes([]byte(v)) + } + + return nil +} + +func decodeTimestampArray(vr *ValueReader) []time.Time { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != TimestampArrayOid && vr.Type().DataType != TimestampTzArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []time.Time", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]time.Time, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + switch elSize { + case 8: + microsecSinceY2K := vr.ReadInt64() + microsecSinceUnixEpoch := microsecFromUnixEpochToY2K + microsecSinceY2K + a[i] = time.Unix(microsecSinceUnixEpoch/1000000, (microsecSinceUnixEpoch%1000000)*1000) + case -1: + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + default: + vr.Fatal(ProtocolError(fmt.Sprintf("Received an invalid size for an time.Time element: %d", elSize))) + return nil + } + } + + return a +} + +func encodeTimeSlice(w *WriteBuf, oid Oid, slice []time.Time) error { + var elOid Oid + switch oid { + case TimestampArrayOid: + elOid = TimestampOid + case TimestampTzArrayOid: + elOid = TimestampTzOid + default: + return fmt.Errorf("cannot encode Go %s into oid %d", "[]time.Time", oid) + } + + encodeArrayHeader(w, int(elOid), len(slice), 12) + for _, t := range slice { + w.WriteInt32(8) + microsecSinceUnixEpoch := t.Unix()*1000000 + int64(t.Nanosecond())/1000 + microsecSinceY2K := microsecSinceUnixEpoch - microsecFromUnixEpochToY2K + w.WriteInt64(microsecSinceY2K) + } + + return nil +} + +func decodeInetArray(vr *ValueReader) []net.IPNet { + if vr.Len() == -1 { + return nil + } + + if vr.Type().DataType != InetArrayOid && vr.Type().DataType != CidrArrayOid { + vr.Fatal(ProtocolError(fmt.Sprintf("Cannot decode oid %v into []net.IP", vr.Type().DataType))) + return nil + } + + if vr.Type().FormatCode != BinaryFormatCode { + vr.Fatal(ProtocolError(fmt.Sprintf("Unknown field description format code: %v", vr.Type().FormatCode))) + return nil + } + + numElems, err := decode1dArrayHeader(vr) + if err != nil { + vr.Fatal(err) + return nil + } + + a := make([]net.IPNet, int(numElems)) + for i := 0; i < len(a); i++ { + elSize := vr.ReadInt32() + if elSize == -1 { + vr.Fatal(ProtocolError("Cannot decode null element")) + return nil + } + + vr.ReadByte() // ignore family + bits := vr.ReadByte() + vr.ReadByte() // ignore is_cidr + addressLength := vr.ReadByte() + + var ipnet net.IPNet + ipnet.IP = vr.ReadBytes(int32(addressLength)) + ipnet.Mask = net.CIDRMask(int(bits), int(addressLength)*8) + + a[i] = ipnet + } + + return a +} + +func encodeIPNetSlice(w *WriteBuf, oid Oid, slice []net.IPNet) error { + var elOid Oid + switch oid { + case InetArrayOid: + elOid = InetOid + case CidrArrayOid: + elOid = CidrOid + default: + return fmt.Errorf("cannot encode Go %s into oid %d", "[]net.IPNet", oid) + } + + size := int32(20) // array header size + for _, ipnet := range slice { + size += 4 + 4 + int32(len(ipnet.IP)) // size of element + inet/cidr metadata + IP bytes + } + w.WriteInt32(int32(size)) + + w.WriteInt32(1) // number of dimensions + w.WriteInt32(0) // no nulls + w.WriteInt32(int32(elOid)) // type of elements + w.WriteInt32(int32(len(slice))) // number of elements + w.WriteInt32(1) // index of first element + + for _, ipnet := range slice { + encodeIPNet(w, elOid, ipnet) + } + + return nil +} + +func encodeIPSlice(w *WriteBuf, oid Oid, slice []net.IP) error { + var elOid Oid + switch oid { + case InetArrayOid: + elOid = InetOid + case CidrArrayOid: + elOid = CidrOid + default: + return fmt.Errorf("cannot encode Go %s into oid %d", "[]net.IPNet", oid) + } + + size := int32(20) // array header size + for _, ip := range slice { + size += 4 + 4 + int32(len(ip)) // size of element + inet/cidr metadata + IP bytes + } + w.WriteInt32(int32(size)) + + w.WriteInt32(1) // number of dimensions + w.WriteInt32(0) // no nulls + w.WriteInt32(int32(elOid)) // type of elements + w.WriteInt32(int32(len(slice))) // number of elements + w.WriteInt32(1) // index of first element + + for _, ip := range slice { + encodeIP(w, elOid, ip) + } + + return nil +} + +func encodeArrayHeader(w *WriteBuf, oid, length, sizePerItem int) { + w.WriteInt32(int32(20 + length*sizePerItem)) + w.WriteInt32(1) // number of dimensions + w.WriteInt32(0) // no nulls + w.WriteInt32(int32(oid)) // type of elements + w.WriteInt32(int32(length)) // number of elements + w.WriteInt32(1) // index of first element +} diff --git a/vendor/github.com/kennygrant/sanitize/.gitignore b/vendor/github.com/kennygrant/sanitize/.gitignore new file mode 100644 index 0000000..0026861 --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/kennygrant/sanitize/.travis.yml b/vendor/github.com/kennygrant/sanitize/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/kennygrant/sanitize/LICENSE b/vendor/github.com/kennygrant/sanitize/LICENSE new file mode 100644 index 0000000..749ebb2 --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 Mechanism Design. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/kennygrant/sanitize/README.md b/vendor/github.com/kennygrant/sanitize/README.md new file mode 100644 index 0000000..4401ef7 --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/README.md @@ -0,0 +1,62 @@ +sanitize [![GoDoc](https://godoc.org/github.com/kennygrant/sanitize?status.svg)](https://godoc.org/github.com/kennygrant/sanitize) [![Go Report Card](https://goreportcard.com/badge/github.com/kennygrant/sanitize)](https://goreportcard.com/report/github.com/kennygrant/sanitize) [![CircleCI](https://circleci.com/gh/kennygrant/sanitize.svg?style=svg)](https://circleci.com/gh/kennygrant/sanitize) +======== + +Package sanitize provides functions to sanitize html and paths with go (golang). + +FUNCTIONS + + +```go +sanitize.Accents(s string) string +``` + +Accents replaces a set of accented characters with ascii equivalents. + +```go +sanitize.BaseName(s string) string +``` + +BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -. Unlike Name no attempt is made to normalise text as a path. + +```go +sanitize.HTML(s string) string +``` + +HTML strips html tags with a very simple parser, replace common entities, and escape < and > in the result. The result is intended to be used as plain text. + +```go +sanitize.HTMLAllowing(s string, args...[]string) (string, error) +``` + +HTMLAllowing parses html and allow certain tags and attributes from the lists optionally specified by args - args[0] is a list of allowed tags, args[1] is a list of allowed attributes. If either is missing default sets are used. + +```go +sanitize.Name(s string) string +``` + +Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters. + +```go +sanitize.Path(s string) string +``` + +Path makes a string safe to use as an url path. + + +Changes +------- + +Version 1.2 + +Adjusted HTML function to avoid linter warning +Added more tests from https://githubengineering.com/githubs-post-csp-journey/ +Chnaged name of license file +Added badges and change log to readme + +Version 1.1 +Fixed type in comments. +Merge pull request from Povilas Balzaravicius Pawka + - replace br tags with newline even when they contain a space + +Version 1.0 +First release \ No newline at end of file diff --git a/vendor/github.com/kennygrant/sanitize/sanitize.go b/vendor/github.com/kennygrant/sanitize/sanitize.go new file mode 100644 index 0000000..2932209 --- /dev/null +++ b/vendor/github.com/kennygrant/sanitize/sanitize.go @@ -0,0 +1,388 @@ +// Package sanitize provides functions for sanitizing text. +package sanitize + +import ( + "bytes" + "html" + "html/template" + "io" + "path" + "regexp" + "strings" + + parser "golang.org/x/net/html" +) + +var ( + ignoreTags = []string{"title", "script", "style", "iframe", "frame", "frameset", "noframes", "noembed", "embed", "applet", "object", "base"} + + defaultTags = []string{"h1", "h2", "h3", "h4", "h5", "h6", "div", "span", "hr", "p", "br", "b", "i", "strong", "em", "ol", "ul", "li", "a", "img", "pre", "code", "blockquote", "article", "section"} + + defaultAttributes = []string{"id", "class", "src", "href", "title", "alt", "name", "rel"} +) + +// HTMLAllowing sanitizes html, allowing some tags. +// Arrays of allowed tags and allowed attributes may optionally be passed as the second and third arguments. +func HTMLAllowing(s string, args ...[]string) (string, error) { + + allowedTags := defaultTags + if len(args) > 0 { + allowedTags = args[0] + } + allowedAttributes := defaultAttributes + if len(args) > 1 { + allowedAttributes = args[1] + } + + // Parse the html + tokenizer := parser.NewTokenizer(strings.NewReader(s)) + + buffer := bytes.NewBufferString("") + ignore := "" + + for { + tokenType := tokenizer.Next() + token := tokenizer.Token() + + switch tokenType { + + case parser.ErrorToken: + err := tokenizer.Err() + if err == io.EOF { + return buffer.String(), nil + } + return "", err + + case parser.StartTagToken: + + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = cleanAttributes(token.Attr, allowedAttributes) + buffer.WriteString(token.String()) + } else if includes(ignoreTags, token.Data) { + ignore = token.Data + } + + case parser.SelfClosingTagToken: + + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = cleanAttributes(token.Attr, allowedAttributes) + buffer.WriteString(token.String()) + } else if token.Data == ignore { + ignore = "" + } + + case parser.EndTagToken: + if len(ignore) == 0 && includes(allowedTags, token.Data) { + token.Attr = []parser.Attribute{} + buffer.WriteString(token.String()) + } else if token.Data == ignore { + ignore = "" + } + + case parser.TextToken: + // We allow text content through, unless ignoring this entire tag and its contents (including other tags) + if ignore == "" { + buffer.WriteString(token.String()) + } + case parser.CommentToken: + // We ignore comments by default + case parser.DoctypeToken: + // We ignore doctypes by default - html5 does not require them and this is intended for sanitizing snippets of text + default: + // We ignore unknown token types by default + + } + + } + +} + +// HTML strips html tags, replace common entities, and escapes <>&;'" in the result. +// Note the returned text may contain entities as it is escaped by HTMLEscapeString, and most entities are not translated. +func HTML(s string) (output string) { + + // Shortcut strings with no tags in them + if !strings.ContainsAny(s, "<>") { + output = s + } else { + + // First remove line breaks etc as these have no meaning outside html tags (except pre) + // this means pre sections will lose formatting... but will result in less unintentional paras. + s = strings.Replace(s, "\n", "", -1) + + // Then replace line breaks with newlines, to preserve that formatting + s = strings.Replace(s, "

", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + s = strings.Replace(s, "
", "\n", -1) + + // Walk through the string removing all tags + b := bytes.NewBufferString("") + inTag := false + for _, r := range s { + switch r { + case '<': + inTag = true + case '>': + inTag = false + default: + if !inTag { + b.WriteRune(r) + } + } + } + output = b.String() + } + + // Remove a few common harmless entities, to arrive at something more like plain text + output = strings.Replace(output, "‘", "'", -1) + output = strings.Replace(output, "’", "'", -1) + output = strings.Replace(output, "“", "\"", -1) + output = strings.Replace(output, "”", "\"", -1) + output = strings.Replace(output, " ", " ", -1) + output = strings.Replace(output, """, "\"", -1) + output = strings.Replace(output, "'", "'", -1) + + // Translate some entities into their plain text equivalent (for example accents, if encoded as entities) + output = html.UnescapeString(output) + + // In case we have missed any tags above, escape the text - removes <, >, &, ' and ". + output = template.HTMLEscapeString(output) + + // After processing, remove some harmless entities &, ' and " which are encoded by HTMLEscapeString + output = strings.Replace(output, """, "\"", -1) + output = strings.Replace(output, "'", "'", -1) + output = strings.Replace(output, "& ", "& ", -1) // NB space after + output = strings.Replace(output, "&amp; ", "& ", -1) // NB space after + + return output +} + +// We are very restrictive as this is intended for ascii url slugs +var illegalPath = regexp.MustCompile(`[^[:alnum:]\~\-\./]`) + +// Path makes a string safe to use as a URL path, +// removing accents and replacing separators with -. +// The path may still start at / and is not intended +// for use as a file system path without prefix. +func Path(s string) string { + // Start with lowercase string + filePath := strings.ToLower(s) + filePath = strings.Replace(filePath, "..", "", -1) + filePath = path.Clean(filePath) + + // Remove illegal characters for paths, flattening accents + // and replacing some common separators with - + filePath = cleanString(filePath, illegalPath) + + // NB this may be of length 0, caller must check + return filePath +} + +// Remove all other unrecognised characters apart from +var illegalName = regexp.MustCompile(`[^[:alnum:]-.]`) + +// Name makes a string safe to use in a file name by first finding the path basename, then replacing non-ascii characters. +func Name(s string) string { + // Start with lowercase string + fileName := strings.ToLower(s) + fileName = path.Clean(path.Base(fileName)) + + // Remove illegal characters for names, replacing some common separators with - + fileName = cleanString(fileName, illegalName) + + // NB this may be of length 0, caller must check + return fileName +} + +// Replace these separators with - +var baseNameSeparators = regexp.MustCompile(`[./]`) + +// BaseName makes a string safe to use in a file name, producing a sanitized basename replacing . or / with -. +// No attempt is made to normalise a path or normalise case. +func BaseName(s string) string { + + // Replace certain joining characters with a dash + baseName := baseNameSeparators.ReplaceAllString(s, "-") + + // Remove illegal characters for names, replacing some common separators with - + baseName = cleanString(baseName, illegalName) + + // NB this may be of length 0, caller must check + return baseName +} + +// A very limited list of transliterations to catch common european names translated to urls. +// This set could be expanded with at least caps and many more characters. +var transliterations = map[rune]string{ + 'À': "A", + 'Á': "A", + 'Â': "A", + 'Ã': "A", + 'Ä': "A", + 'Å': "AA", + 'Æ': "AE", + 'Ç': "C", + 'È': "E", + 'É': "E", + 'Ê': "E", + 'Ë': "E", + 'Ì': "I", + 'Í': "I", + 'Î': "I", + 'Ï': "I", + 'Ð': "D", + 'Ł': "L", + 'Ñ': "N", + 'Ò': "O", + 'Ó': "O", + 'Ô': "O", + 'Õ': "O", + 'Ö': "OE", + 'Ø': "OE", + 'Œ': "OE", + 'Ù': "U", + 'Ú': "U", + 'Ü': "UE", + 'Û': "U", + 'Ý': "Y", + 'Þ': "TH", + 'ẞ': "SS", + 'à': "a", + 'á': "a", + 'â': "a", + 'ã': "a", + 'ä': "ae", + 'å': "aa", + 'æ': "ae", + 'ç': "c", + 'è': "e", + 'é': "e", + 'ê': "e", + 'ë': "e", + 'ì': "i", + 'í': "i", + 'î': "i", + 'ï': "i", + 'ð': "d", + 'ł': "l", + 'ñ': "n", + 'ń': "n", + 'ò': "o", + 'ó': "o", + 'ô': "o", + 'õ': "o", + 'ō': "o", + 'ö': "oe", + 'ø': "oe", + 'œ': "oe", + 'ś': "s", + 'ù': "u", + 'ú': "u", + 'û': "u", + 'ū': "u", + 'ü': "ue", + 'ý': "y", + 'ÿ': "y", + 'ż': "z", + 'þ': "th", + 'ß': "ss", +} + +// Accents replaces a set of accented characters with ascii equivalents. +func Accents(s string) string { + // Replace some common accent characters + b := bytes.NewBufferString("") + for _, c := range s { + // Check transliterations first + if val, ok := transliterations[c]; ok { + b.WriteString(val) + } else { + b.WriteRune(c) + } + } + return b.String() +} + +var ( + // If the attribute contains data: or javascript: anywhere, ignore it + // we don't allow this in attributes as it is so frequently used for xss + // NB we allow spaces in the value, and lowercase. + illegalAttr = regexp.MustCompile(`(d\s*a\s*t\s*a|j\s*a\s*v\s*a\s*s\s*c\s*r\s*i\s*p\s*t\s*)\s*:`) + + // We are far more restrictive with href attributes. + legalHrefAttr = regexp.MustCompile(`\A[/#][^/\\]?|mailto:|http://|https://`) +) + +// cleanAttributes returns an array of attributes after removing malicious ones. +func cleanAttributes(a []parser.Attribute, allowed []string) []parser.Attribute { + if len(a) == 0 { + return a + } + + var cleaned []parser.Attribute + for _, attr := range a { + if includes(allowed, attr.Key) { + + val := strings.ToLower(attr.Val) + + // Check for illegal attribute values + if illegalAttr.FindString(val) != "" { + attr.Val = "" + } + + // Check for legal href values - / mailto:// http:// or https:// + if attr.Key == "href" { + if legalHrefAttr.FindString(val) == "" { + attr.Val = "" + } + } + + // If we still have an attribute, append it to the array + if attr.Val != "" { + cleaned = append(cleaned, attr) + } + } + } + return cleaned +} + +// A list of characters we consider separators in normal strings and replace with our canonical separator - rather than removing. +var ( + separators = regexp.MustCompile(`[ &_=+:]`) + + dashes = regexp.MustCompile(`[\-]+`) +) + +// cleanString replaces separators with - and removes characters listed in the regexp provided from string. +// Accents, spaces, and all characters not in A-Za-z0-9 are replaced. +func cleanString(s string, r *regexp.Regexp) string { + + // Remove any trailing space to avoid ending on - + s = strings.Trim(s, " ") + + // Flatten accents first so that if we remove non-ascii we still get a legible name + s = Accents(s) + + // Replace certain joining characters with a dash + s = separators.ReplaceAllString(s, "-") + + // Remove all other unrecognised characters - NB we do allow any printable characters + s = r.ReplaceAllString(s, "") + + // Remove any multiple dashes caused by replacements above + s = dashes.ReplaceAllString(s, "-") + + return s +} + +// includes checks for inclusion of a string in a []string. +func includes(a []string, s string) bool { + for _, as := range a { + if as == s { + return true + } + } + return false +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE new file mode 100644 index 0000000..14127cd --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE @@ -0,0 +1,9 @@ +(The MIT License) + +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md new file mode 100644 index 0000000..09a4a35 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md @@ -0,0 +1,42 @@ +# Windows Terminal Sequences + +This library allow for enabling Windows terminal color support for Go. + +See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details. + +## Usage + +```go +import ( + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func main() { + sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true) +} + +``` + +## Authors + +The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). + +We thank all the authors who provided code to this library: + +* Felix Kollmann +* Nicolas Perraut +* @dirty49374 + +## License + +(The MIT License) + +Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod new file mode 100644 index 0000000..716c613 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod @@ -0,0 +1 @@ +module github.com/konsorten/go-windows-terminal-sequences diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go new file mode 100644 index 0000000..57f530a --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -0,0 +1,35 @@ +// +build windows + +package sequences + +import ( + "syscall" +) + +var ( + kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll") + setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode") +) + +func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { + const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4 + + var mode uint32 + err := syscall.GetConsoleMode(syscall.Stdout, &mode) + if err != nil { + return err + } + + if enable { + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } + + ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode)) + if ret == 0 { + return err + } + + return nil +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go new file mode 100644 index 0000000..df61a6f --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go @@ -0,0 +1,11 @@ +// +build linux darwin + +package sequences + +import ( + "fmt" +) + +func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error { + return fmt.Errorf("windows only package") +} diff --git a/vendor/github.com/leodido/go-urn/.gitignore b/vendor/github.com/leodido/go-urn/.gitignore new file mode 100644 index 0000000..a30b5ab --- /dev/null +++ b/vendor/github.com/leodido/go-urn/.gitignore @@ -0,0 +1,9 @@ +*.exe +*.dll +*.so +*.dylib + +*.test + +*.out +*.txt \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/.travis.yml b/vendor/github.com/leodido/go-urn/.travis.yml new file mode 100644 index 0000000..913b641 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - 1.9.x + - 1.10.x + - tip + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/README.md b/vendor/github.com/leodido/go-urn/README.md new file mode 100644 index 0000000..cc902ec --- /dev/null +++ b/vendor/github.com/leodido/go-urn/README.md @@ -0,0 +1,55 @@ +[![Build](https://img.shields.io/travis/leodido/go-urn/master.svg?style=for-the-badge)](https://travis-ci.org/leodido/go-urn) [![Coverage](https://img.shields.io/codecov/c/github/leodido/go-urn.svg?style=for-the-badge)](https://codecov.io/gh/leodido/go-urn) [![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg?style=for-the-badge)](https://godoc.org/github.com/leodido/go-urn) + +**A parser for URNs**. + +> As seen on [RFC 2141](https://tools.ietf.org/html/rfc2141#ref-1). + +[API documentation](https://godoc.org/github.com/leodido/go-urn). + +## Installation + +``` +go get github.com/leodido/go-urn +``` + +## Performances + +This implementation results to be really fast. + +Usually below ½ microsecond on my machine[1](#mymachine). + +Notice it also performs, while parsing: + +1. fine-grained and informative erroring +2. specific-string normalization + +``` +ok/00/urn:a:b______________________________________/-4 20000000 265 ns/op 182 B/op 6 allocs/op +ok/01/URN:foo:a123,456_____________________________/-4 30000000 296 ns/op 200 B/op 6 allocs/op +ok/02/urn:foo:a123%2c456___________________________/-4 20000000 331 ns/op 208 B/op 6 allocs/op +ok/03/urn:ietf:params:scim:schemas:core:2.0:User___/-4 20000000 430 ns/op 280 B/op 6 allocs/op +ok/04/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 411 ns/op 312 B/op 6 allocs/op +ok/05/urn:ietf:params:scim:schemas:extension:enterp/-4 20000000 472 ns/op 344 B/op 6 allocs/op +ok/06/urn:burnout:nss______________________________/-4 30000000 257 ns/op 192 B/op 6 allocs/op +ok/07/urn:abcdefghilmnopqrstuvzabcdefghilm:x_______/-4 20000000 375 ns/op 213 B/op 6 allocs/op +ok/08/urn:urnurnurn:urn____________________________/-4 30000000 265 ns/op 197 B/op 6 allocs/op +ok/09/urn:ciao:@!=%2c(xyz)+a,b.*@g=$_'_____________/-4 20000000 307 ns/op 248 B/op 6 allocs/op +ok/10/URN:x:abc%1dz%2f%3az_________________________/-4 30000000 259 ns/op 212 B/op 6 allocs/op +no/11/URN:-xxx:x___________________________________/-4 20000000 445 ns/op 320 B/op 6 allocs/op +no/12/urn::colon:nss_______________________________/-4 20000000 461 ns/op 320 B/op 6 allocs/op +no/13/urn:abcdefghilmnopqrstuvzabcdefghilmn:specifi/-4 10000000 660 ns/op 320 B/op 6 allocs/op +no/14/URN:a!?:x____________________________________/-4 20000000 507 ns/op 320 B/op 6 allocs/op +no/15/urn:urn:NSS__________________________________/-4 20000000 429 ns/op 288 B/op 6 allocs/op +no/16/urn:white_space:NSS__________________________/-4 20000000 482 ns/op 320 B/op 6 allocs/op +no/17/urn:concat:no_spaces_________________________/-4 20000000 539 ns/op 328 B/op 7 allocs/op +no/18/urn:a:/______________________________________/-4 20000000 470 ns/op 320 B/op 7 allocs/op +no/19/urn:UrN:NSS__________________________________/-4 20000000 399 ns/op 288 B/op 6 allocs/op +``` + +--- + +* [1]: Intel Core i7-7600U CPU @ 2.80GHz + +--- + +[![Analytics](https://ga-beacon.appspot.com/UA-49657176-1/go-urn?flat)](https://github.com/igrigorik/ga-beacon) \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/machine.go b/vendor/github.com/leodido/go-urn/machine.go new file mode 100644 index 0000000..d621ea6 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/machine.go @@ -0,0 +1,1670 @@ +package urn + +import ( + "fmt" +) + +var ( + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]" + errParse = "parsing error [col %d]" +) + + +const start int = 1 +const first_final int = 44 + +const en_fail int = 46 +const en_main int = 1 + + +// Machine is the interface representing the FSM +type Machine interface { + Error() error + Parse(input []byte) (*URN, error) +} + +type machine struct { + data []byte + cs int + p, pe, eof, pb int + err error + tolower []int +} + +// NewMachine creates a new FSM able to parse RFC 2141 strings. +func NewMachine() Machine { + m := &machine{} + + return m +} + +// Err returns the error that occurred on the last call to Parse. +// +// If the result is nil, then the line was parsed successfully. +func (m *machine) Error() error { + return m.err +} + +func (m *machine) text() []byte { + return m.data[m.pb:m.p] +} + +// Parse parses the input byte array as a RFC 2141 string. +func (m *machine) Parse(input []byte) (*URN, error) { + m.data = input + m.p = 0 + m.pb = 0 + m.pe = len(input) + m.eof = len(input) + m.err = nil + m.tolower = []int{} + output := &URN{} + + { + m.cs = start + } + + + { + if (m.p) == (m.pe) { + goto _test_eof + } + switch m.cs { + case 1: + goto st_case_1 + case 0: + goto st_case_0 + case 2: + goto st_case_2 + case 3: + goto st_case_3 + case 4: + goto st_case_4 + case 5: + goto st_case_5 + case 6: + goto st_case_6 + case 7: + goto st_case_7 + case 8: + goto st_case_8 + case 9: + goto st_case_9 + case 10: + goto st_case_10 + case 11: + goto st_case_11 + case 12: + goto st_case_12 + case 13: + goto st_case_13 + case 14: + goto st_case_14 + case 15: + goto st_case_15 + case 16: + goto st_case_16 + case 17: + goto st_case_17 + case 18: + goto st_case_18 + case 19: + goto st_case_19 + case 20: + goto st_case_20 + case 21: + goto st_case_21 + case 22: + goto st_case_22 + case 23: + goto st_case_23 + case 24: + goto st_case_24 + case 25: + goto st_case_25 + case 26: + goto st_case_26 + case 27: + goto st_case_27 + case 28: + goto st_case_28 + case 29: + goto st_case_29 + case 30: + goto st_case_30 + case 31: + goto st_case_31 + case 32: + goto st_case_32 + case 33: + goto st_case_33 + case 34: + goto st_case_34 + case 35: + goto st_case_35 + case 36: + goto st_case_36 + case 37: + goto st_case_37 + case 38: + goto st_case_38 + case 44: + goto st_case_44 + case 39: + goto st_case_39 + case 40: + goto st_case_40 + case 45: + goto st_case_45 + case 41: + goto st_case_41 + case 42: + goto st_case_42 + case 43: + goto st_case_43 + case 46: + goto st_case_46 + } + goto st_out + st_case_1: + switch (m.data)[(m.p)] { + case 85: + goto tr1 + case 117: + goto tr1 + } + goto tr0 + tr0: + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr3: + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr6: + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr41: + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr44: + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr50: + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + tr52: + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + goto st0 + st_case_0: + st0: + m.cs = 0 + goto _out + tr1: + m.pb = m.p + + goto st2 + st2: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof2 + } + st_case_2: + switch (m.data)[(m.p)] { + case 82: + goto st3 + case 114: + goto st3 + } + goto tr0 + st3: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof3 + } + st_case_3: + switch (m.data)[(m.p)] { + case 78: + goto st4 + case 110: + goto st4 + } + goto tr3 + st4: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof4 + } + st_case_4: + if (m.data)[(m.p)] == 58 { + goto tr5 + } + goto tr0 + tr5: + output.prefix = string(m.text()) + + goto st5 + st5: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof5 + } + st_case_5: + switch (m.data)[(m.p)] { + case 85: + goto tr8 + case 117: + goto tr8 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto tr7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr7 + } + default: + goto tr7 + } + goto tr6 + tr7: + m.pb = m.p + + goto st6 + st6: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof6 + } + st_case_6: + switch (m.data)[(m.p)] { + case 45: + goto st7 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st7 + } + default: + goto st7 + } + goto tr6 + st7: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof7 + } + st_case_7: + switch (m.data)[(m.p)] { + case 45: + goto st8 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st8 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st8 + } + default: + goto st8 + } + goto tr6 + st8: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof8 + } + st_case_8: + switch (m.data)[(m.p)] { + case 45: + goto st9 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st9 + } + default: + goto st9 + } + goto tr6 + st9: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof9 + } + st_case_9: + switch (m.data)[(m.p)] { + case 45: + goto st10 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st10 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st10 + } + default: + goto st10 + } + goto tr6 + st10: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof10 + } + st_case_10: + switch (m.data)[(m.p)] { + case 45: + goto st11 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st11 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st11 + } + default: + goto st11 + } + goto tr6 + st11: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof11 + } + st_case_11: + switch (m.data)[(m.p)] { + case 45: + goto st12 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st12 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st12 + } + default: + goto st12 + } + goto tr6 + st12: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof12 + } + st_case_12: + switch (m.data)[(m.p)] { + case 45: + goto st13 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st13 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st13 + } + default: + goto st13 + } + goto tr6 + st13: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof13 + } + st_case_13: + switch (m.data)[(m.p)] { + case 45: + goto st14 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st14 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st14 + } + default: + goto st14 + } + goto tr6 + st14: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof14 + } + st_case_14: + switch (m.data)[(m.p)] { + case 45: + goto st15 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st15 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st15 + } + default: + goto st15 + } + goto tr6 + st15: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof15 + } + st_case_15: + switch (m.data)[(m.p)] { + case 45: + goto st16 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st16 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st16 + } + default: + goto st16 + } + goto tr6 + st16: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof16 + } + st_case_16: + switch (m.data)[(m.p)] { + case 45: + goto st17 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st17 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st17 + } + default: + goto st17 + } + goto tr6 + st17: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof17 + } + st_case_17: + switch (m.data)[(m.p)] { + case 45: + goto st18 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st18 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st18 + } + default: + goto st18 + } + goto tr6 + st18: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof18 + } + st_case_18: + switch (m.data)[(m.p)] { + case 45: + goto st19 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st19 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st19 + } + default: + goto st19 + } + goto tr6 + st19: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof19 + } + st_case_19: + switch (m.data)[(m.p)] { + case 45: + goto st20 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st20 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st20 + } + default: + goto st20 + } + goto tr6 + st20: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof20 + } + st_case_20: + switch (m.data)[(m.p)] { + case 45: + goto st21 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st21 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st21 + } + default: + goto st21 + } + goto tr6 + st21: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof21 + } + st_case_21: + switch (m.data)[(m.p)] { + case 45: + goto st22 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st22 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st22 + } + default: + goto st22 + } + goto tr6 + st22: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof22 + } + st_case_22: + switch (m.data)[(m.p)] { + case 45: + goto st23 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st23 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st23 + } + default: + goto st23 + } + goto tr6 + st23: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof23 + } + st_case_23: + switch (m.data)[(m.p)] { + case 45: + goto st24 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st24 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st24 + } + default: + goto st24 + } + goto tr6 + st24: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof24 + } + st_case_24: + switch (m.data)[(m.p)] { + case 45: + goto st25 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st25 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st25 + } + default: + goto st25 + } + goto tr6 + st25: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof25 + } + st_case_25: + switch (m.data)[(m.p)] { + case 45: + goto st26 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st26 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st26 + } + default: + goto st26 + } + goto tr6 + st26: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof26 + } + st_case_26: + switch (m.data)[(m.p)] { + case 45: + goto st27 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st27 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st27 + } + default: + goto st27 + } + goto tr6 + st27: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof27 + } + st_case_27: + switch (m.data)[(m.p)] { + case 45: + goto st28 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st28 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st28 + } + default: + goto st28 + } + goto tr6 + st28: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof28 + } + st_case_28: + switch (m.data)[(m.p)] { + case 45: + goto st29 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st29 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st29 + } + default: + goto st29 + } + goto tr6 + st29: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof29 + } + st_case_29: + switch (m.data)[(m.p)] { + case 45: + goto st30 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st30 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st30 + } + default: + goto st30 + } + goto tr6 + st30: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof30 + } + st_case_30: + switch (m.data)[(m.p)] { + case 45: + goto st31 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st31 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st31 + } + default: + goto st31 + } + goto tr6 + st31: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof31 + } + st_case_31: + switch (m.data)[(m.p)] { + case 45: + goto st32 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st32 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st32 + } + default: + goto st32 + } + goto tr6 + st32: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof32 + } + st_case_32: + switch (m.data)[(m.p)] { + case 45: + goto st33 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st33 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st33 + } + default: + goto st33 + } + goto tr6 + st33: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof33 + } + st_case_33: + switch (m.data)[(m.p)] { + case 45: + goto st34 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st34 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st34 + } + default: + goto st34 + } + goto tr6 + st34: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof34 + } + st_case_34: + switch (m.data)[(m.p)] { + case 45: + goto st35 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st35 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st35 + } + default: + goto st35 + } + goto tr6 + st35: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof35 + } + st_case_35: + switch (m.data)[(m.p)] { + case 45: + goto st36 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st36 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st36 + } + default: + goto st36 + } + goto tr6 + st36: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof36 + } + st_case_36: + switch (m.data)[(m.p)] { + case 45: + goto st37 + case 58: + goto tr10 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st37 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st37 + } + default: + goto st37 + } + goto tr6 + st37: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof37 + } + st_case_37: + if (m.data)[(m.p)] == 58 { + goto tr10 + } + goto tr6 + tr10: + output.ID = string(m.text()) + + goto st38 + st38: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof38 + } + st_case_38: + switch (m.data)[(m.p)] { + case 33: + goto tr42 + case 36: + goto tr42 + case 37: + goto tr43 + case 61: + goto tr42 + case 95: + goto tr42 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto tr42 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto tr42 + } + case (m.data)[(m.p)] >= 64: + goto tr42 + } + default: + goto tr42 + } + goto tr41 + tr42: + m.pb = m.p + + goto st44 + st44: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof44 + } + st_case_44: + switch (m.data)[(m.p)] { + case 33: + goto st44 + case 36: + goto st44 + case 37: + goto st39 + case 61: + goto st44 + case 95: + goto st44 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto st44 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st44 + } + case (m.data)[(m.p)] >= 64: + goto st44 + } + default: + goto st44 + } + goto tr41 + tr43: + m.pb = m.p + + goto st39 + st39: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof39 + } + st_case_39: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st40 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st40 + } + default: + goto tr46 + } + goto tr44 + tr46: + m.tolower = append(m.tolower, m.p-m.pb) + + goto st40 + st40: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof40 + } + st_case_40: + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st45 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st45 + } + default: + goto tr48 + } + goto tr44 + tr48: + m.tolower = append(m.tolower, m.p-m.pb) + + goto st45 + st45: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof45 + } + st_case_45: + switch (m.data)[(m.p)] { + case 33: + goto st44 + case 36: + goto st44 + case 37: + goto st39 + case 61: + goto st44 + case 95: + goto st44 + } + switch { + case (m.data)[(m.p)] < 48: + if 39 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 46 { + goto st44 + } + case (m.data)[(m.p)] > 59: + switch { + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st44 + } + case (m.data)[(m.p)] >= 64: + goto st44 + } + default: + goto st44 + } + goto tr44 + tr8: + m.pb = m.p + + goto st41 + st41: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof41 + } + st_case_41: + switch (m.data)[(m.p)] { + case 45: + goto st7 + case 58: + goto tr10 + case 82: + goto st42 + case 114: + goto st42 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st7 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st7 + } + default: + goto st7 + } + goto tr6 + st42: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof42 + } + st_case_42: + switch (m.data)[(m.p)] { + case 45: + goto st8 + case 58: + goto tr10 + case 78: + goto st43 + case 110: + goto st43 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st8 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st8 + } + default: + goto st8 + } + goto tr50 + st43: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof43 + } + st_case_43: + if (m.data)[(m.p)] == 45 { + goto st9 + } + switch { + case (m.data)[(m.p)] < 65: + if 48 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 57 { + goto st9 + } + case (m.data)[(m.p)] > 90: + if 97 <= (m.data)[(m.p)] && (m.data)[(m.p)] <= 122 { + goto st9 + } + default: + goto st9 + } + goto tr52 + st46: + if (m.p)++; (m.p) == (m.pe) { + goto _test_eof46 + } + st_case_46: + switch (m.data)[(m.p)] { + case 10: + goto st0 + case 13: + goto st0 + } + goto st46 + st_out: + _test_eof2: + m.cs = 2 + goto _test_eof + _test_eof3: + m.cs = 3 + goto _test_eof + _test_eof4: + m.cs = 4 + goto _test_eof + _test_eof5: + m.cs = 5 + goto _test_eof + _test_eof6: + m.cs = 6 + goto _test_eof + _test_eof7: + m.cs = 7 + goto _test_eof + _test_eof8: + m.cs = 8 + goto _test_eof + _test_eof9: + m.cs = 9 + goto _test_eof + _test_eof10: + m.cs = 10 + goto _test_eof + _test_eof11: + m.cs = 11 + goto _test_eof + _test_eof12: + m.cs = 12 + goto _test_eof + _test_eof13: + m.cs = 13 + goto _test_eof + _test_eof14: + m.cs = 14 + goto _test_eof + _test_eof15: + m.cs = 15 + goto _test_eof + _test_eof16: + m.cs = 16 + goto _test_eof + _test_eof17: + m.cs = 17 + goto _test_eof + _test_eof18: + m.cs = 18 + goto _test_eof + _test_eof19: + m.cs = 19 + goto _test_eof + _test_eof20: + m.cs = 20 + goto _test_eof + _test_eof21: + m.cs = 21 + goto _test_eof + _test_eof22: + m.cs = 22 + goto _test_eof + _test_eof23: + m.cs = 23 + goto _test_eof + _test_eof24: + m.cs = 24 + goto _test_eof + _test_eof25: + m.cs = 25 + goto _test_eof + _test_eof26: + m.cs = 26 + goto _test_eof + _test_eof27: + m.cs = 27 + goto _test_eof + _test_eof28: + m.cs = 28 + goto _test_eof + _test_eof29: + m.cs = 29 + goto _test_eof + _test_eof30: + m.cs = 30 + goto _test_eof + _test_eof31: + m.cs = 31 + goto _test_eof + _test_eof32: + m.cs = 32 + goto _test_eof + _test_eof33: + m.cs = 33 + goto _test_eof + _test_eof34: + m.cs = 34 + goto _test_eof + _test_eof35: + m.cs = 35 + goto _test_eof + _test_eof36: + m.cs = 36 + goto _test_eof + _test_eof37: + m.cs = 37 + goto _test_eof + _test_eof38: + m.cs = 38 + goto _test_eof + _test_eof44: + m.cs = 44 + goto _test_eof + _test_eof39: + m.cs = 39 + goto _test_eof + _test_eof40: + m.cs = 40 + goto _test_eof + _test_eof45: + m.cs = 45 + goto _test_eof + _test_eof41: + m.cs = 41 + goto _test_eof + _test_eof42: + m.cs = 42 + goto _test_eof + _test_eof43: + m.cs = 43 + goto _test_eof + _test_eof46: + m.cs = 46 + goto _test_eof + + _test_eof: + { + } + if (m.p) == (m.eof) { + switch m.cs { + case 44, 45: + raw := m.text() + output.SS = string(raw) + // Iterate upper letters lowering them + for _, i := range m.tolower { + raw[i] = raw[i] + 32 + } + output.norm = string(raw) + + case 1, 2, 4: + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 3: + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 41: + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 38: + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 42: + m.err = fmt.Errorf(errPrefix, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 43: + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errIdentifier, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + case 39, 40: + m.err = fmt.Errorf(errHex, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errSpecificString, m.p) + (m.p)-- + + { + goto st46 + } + + m.err = fmt.Errorf(errParse, m.p) + (m.p)-- + + { + goto st46 + } + + } + } + + _out: + { + } + } + + if m.cs < first_final || m.cs == en_fail { + return nil, m.err + } + + return output, nil +} diff --git a/vendor/github.com/leodido/go-urn/machine.go.rl b/vendor/github.com/leodido/go-urn/machine.go.rl new file mode 100644 index 0000000..3bc05a6 --- /dev/null +++ b/vendor/github.com/leodido/go-urn/machine.go.rl @@ -0,0 +1,159 @@ +package urn + +import ( + "fmt" +) + +var ( + errPrefix = "expecting the prefix to be the \"urn\" string (whatever case) [col %d]" + errIdentifier = "expecting the identifier to be string (1..31 alnum chars, also containing dashes but not at its start) [col %d]" + errSpecificString = "expecting the specific string to be a string containing alnum, hex, or others ([()+,-.:=@;$_!*']) chars [col %d]" + errNoUrnWithinID = "expecting the identifier to not contain the \"urn\" reserved string [col %d]" + errHex = "expecting the specific string hex chars to be well-formed (%%alnum{2}) [col %d]" + errParse = "parsing error [col %d]" +) + +%%{ +machine urn; + +# unsigned alphabet +alphtype uint8; + +action mark { + m.pb = m.p +} + +action tolower { + m.tolower = append(m.tolower, m.p - m.pb) +} + +action set_pre { + output.prefix = string(m.text()) +} + +action set_nid { + output.ID = string(m.text()) +} + +action set_nss { + raw := m.text() + output.SS = string(raw) + // Iterate upper letters lowering them + for _, i := range m.tolower { + raw[i] = raw[i] + 32 + } + output.norm = string(raw) +} + +action err_pre { + m.err = fmt.Errorf(errPrefix, m.p) + fhold; + fgoto fail; +} + +action err_nid { + m.err = fmt.Errorf(errIdentifier, m.p) + fhold; + fgoto fail; +} + +action err_nss { + m.err = fmt.Errorf(errSpecificString, m.p) + fhold; + fgoto fail; +} + +action err_urn { + m.err = fmt.Errorf(errNoUrnWithinID, m.p) + fhold; + fgoto fail; +} + +action err_hex { + m.err = fmt.Errorf(errHex, m.p) + fhold; + fgoto fail; +} + +action err_parse { + m.err = fmt.Errorf(errParse, m.p) + fhold; + fgoto fail; +} + +pre = ([uU][rR][nN] @err(err_pre)) >mark %set_pre; + +nid = (alnum >mark (alnum | '-'){0,31}) %set_nid; + +hex = '%' (digit | lower | upper >tolower){2} $err(err_hex); + +sss = (alnum | [()+,\-.:=@;$_!*']); + +nss = (sss | hex)+ $err(err_nss); + +fail := (any - [\n\r])* @err{ fgoto main; }; + +main := (pre ':' (nid - pre %err(err_urn)) $err(err_nid) ':' nss >mark %set_nss) $err(err_parse); + +}%% + +%% write data noerror noprefix; + +// Machine is the interface representing the FSM +type Machine interface { + Error() error + Parse(input []byte) (*URN, error) +} + +type machine struct { + data []byte + cs int + p, pe, eof, pb int + err error + tolower []int +} + +// NewMachine creates a new FSM able to parse RFC 2141 strings. +func NewMachine() Machine { + m := &machine{} + + %% access m.; + %% variable p m.p; + %% variable pe m.pe; + %% variable eof m.eof; + %% variable data m.data; + + return m +} + +// Err returns the error that occurred on the last call to Parse. +// +// If the result is nil, then the line was parsed successfully. +func (m *machine) Error() error { + return m.err +} + +func (m *machine) text() []byte { + return m.data[m.pb:m.p] +} + +// Parse parses the input byte array as a RFC 2141 string. +func (m *machine) Parse(input []byte) (*URN, error) { + m.data = input + m.p = 0 + m.pb = 0 + m.pe = len(input) + m.eof = len(input) + m.err = nil + m.tolower = []int{} + output := &URN{} + + %% write init; + %% write exec; + + if m.cs < first_final || m.cs == en_fail { + return nil, m.err + } + + return output, nil +} diff --git a/vendor/github.com/leodido/go-urn/makefile b/vendor/github.com/leodido/go-urn/makefile new file mode 100644 index 0000000..362137a --- /dev/null +++ b/vendor/github.com/leodido/go-urn/makefile @@ -0,0 +1,17 @@ +SHELL := /bin/bash + +machine.go: machine.go.rl + ragel -Z -G2 -e -o $@ $< + @gofmt -w -s $@ + @sed -i '/^\/\/line/d' $@ + +.PHONY: build +build: machine.go + +.PHONY: bench +bench: *_test.go machine.go + go test -bench=. -benchmem -benchtime=5s ./... + +.PHONY: tests +tests: *_test.go machine.go + go test -race -timeout 10s -coverprofile=coverage.out -covermode=atomic -v ./... \ No newline at end of file diff --git a/vendor/github.com/leodido/go-urn/urn.go b/vendor/github.com/leodido/go-urn/urn.go new file mode 100644 index 0000000..b903b7b --- /dev/null +++ b/vendor/github.com/leodido/go-urn/urn.go @@ -0,0 +1,63 @@ +package urn + +import ( + "strings" +) + +// URN represents an Uniform Resource Name. +// +// The general form represented is: +// +// urn:: +// +// Details at https://tools.ietf.org/html/rfc2141. +type URN struct { + prefix string // Static prefix. Equal to "urn" when empty. + ID string // Namespace identifier + SS string // Namespace specific string + norm string // Normalized namespace specific string +} + +// Normalize turns the receiving URN into its norm version. +// +// Which means: lowercase prefix, lowercase namespace identifier, and immutate namespace specific string chars (except tokens which are lowercased). +func (u *URN) Normalize() *URN { + return &URN{ + prefix: "urn", + ID: strings.ToLower(u.ID), + SS: u.norm, + } +} + +// Equal checks the lexical equivalence of the current URN with another one. +func (u *URN) Equal(x *URN) bool { + return *u.Normalize() == *x.Normalize() +} + +// String reassembles the URN into a valid URN string. +// +// This requires both ID and SS fields to be non-empty. +// Otherwise it returns an empty string. +// +// Default URN prefix is "urn". +func (u *URN) String() string { + var res string + if u.ID != "" && u.SS != "" { + if u.prefix == "" { + res += "urn" + } + res += u.prefix + ":" + u.ID + ":" + u.SS + } + + return res +} + +// Parse is responsible to create an URN instance from a byte array matching the correct URN syntax. +func Parse(u []byte) (*URN, bool) { + urn, err := NewMachine().Parse(u) + if err != nil { + return nil, false + } + + return urn, true +} diff --git a/vendor/github.com/microcosm-cc/bluemonday/.coveralls.yml b/vendor/github.com/microcosm-cc/bluemonday/.coveralls.yml new file mode 100644 index 0000000..e0c8760 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/.coveralls.yml @@ -0,0 +1 @@ +repo_token: x2wlA1x0X8CK45ybWpZRCVRB4g7vtkhaw diff --git a/vendor/github.com/microcosm-cc/bluemonday/.travis.yml b/vendor/github.com/microcosm-cc/bluemonday/.travis.yml new file mode 100644 index 0000000..4f66646 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/.travis.yml @@ -0,0 +1,22 @@ +language: go +go: + - 1.1.x + - 1.2.x + - 1.3.x + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - tip +matrix: + allow_failures: + - go: tip + fast_finish: true +install: + - go get . +script: + - go test -v ./... diff --git a/vendor/github.com/microcosm-cc/bluemonday/CONTRIBUTING.md b/vendor/github.com/microcosm-cc/bluemonday/CONTRIBUTING.md new file mode 100644 index 0000000..d2b1230 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/CONTRIBUTING.md @@ -0,0 +1,51 @@ +# Contributing to bluemonday + +Third-party patches are essential for keeping bluemonday secure and offering the features developers want. However there are a few guidelines that we need contributors to follow so that we can maintain the quality of work that developers who use bluemonday expect. + +## Getting Started + +* Make sure you have a [Github account](https://github.com/signup/free) + +## Guidelines + +1. Do not vendor dependencies. As a security package, were we to vendor dependencies the projects that then vendor bluemonday may not receive the latest security updates to the dependencies. By not vendoring dependencies the project that implements bluemonday will vendor the latest version of any dependent packages. Vendoring is a project problem, not a package problem. bluemonday will be tested against the latest version of dependencies periodically and during any PR/merge. + +## Submitting an Issue + +* Submit a ticket for your issue, assuming one does not already exist +* Clearly describe the issue including the steps to reproduce (with sample input and output) if it is a bug + +If you are reporting a security flaw, you may expect that we will provide the code to fix it for you. Otherwise you may want to submit a pull request to ensure the resolution is applied sooner rather than later: + +* Fork the repository on Github +* Issue a pull request containing code to resolve the issue + +## Submitting a Pull Request + +* Submit a ticket for your issue, assuming one does not already exist +* Describe the reason for the pull request and if applicable show some example inputs and outputs to demonstrate what the patch does +* Fork the repository on Github +* Before submitting the pull request you should + 1. Include tests for your patch, 1 test should encapsulate the entire patch and should refer to the Github issue + 1. If you have added new exposed/public functionality, you should ensure it is documented appropriately + 1. If you have added new exposed/public functionality, you should consider demonstrating how to use it within one of the helpers or shipped policies if appropriate or within a test if modifying a helper or policy is not appropriate + 1. Run all of the tests `go test -v ./...` or `make test` and ensure all tests pass + 1. Run gofmt `gofmt -w ./$*` or `make fmt` + 1. Run vet `go tool vet *.go` or `make vet` and resolve any issues + 1. Install golint using `go get -u github.com/golang/lint/golint` and run vet `golint *.go` or `make lint` and resolve every warning +* When submitting the pull request you should + 1. Note the issue(s) it resolves, i.e. `Closes #6` in the pull request comment to close issue #6 when the pull request is accepted + +Once you have submitted a pull request, we *may* merge it without changes. If we have any comments or feedback, or need you to make changes to your pull request we will update the Github pull request or the associated issue. We expect responses from you within two weeks, and we may close the pull request is there is no activity. + +### Contributor Licence Agreement + +We haven't gone for the formal "Sign a Contributor Licence Agreement" thing that projects like [puppet](https://cla.puppetlabs.com/), [Mojito](https://developer.yahoo.com/cocktails/mojito/cla/) and companies like [Google](http://code.google.com/legal/individual-cla-v1.0.html) are using. + +But we do need to know that we can accept and merge your contributions, so for now the act of contributing a pull request should be considered equivalent to agreeing to a contributor licence agreement, specifically: + +You accept that the act of submitting code to the bluemonday project is to grant a copyright licence to the project that is perpetual, worldwide, non-exclusive, no-charge, royalty free and irrevocable. + +You accept that all who comply with the licence of the project (BSD 3-clause) are permitted to use your contributions to the project. + +You accept, and by submitting code do declare, that you have the legal right to grant such a licence to the project and that each of the contributions is your own original creation. diff --git a/vendor/github.com/microcosm-cc/bluemonday/CREDITS.md b/vendor/github.com/microcosm-cc/bluemonday/CREDITS.md new file mode 100644 index 0000000..b98873f --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/CREDITS.md @@ -0,0 +1,6 @@ +1. Andrew Krasichkov @buglloc https://github.com/buglloc +1. John Graham-Cumming http://jgc.org/ +1. Mike Samuel mikesamuel@gmail.com +1. Dmitri Shuralyov shurcooL@gmail.com +1. https://github.com/opennota +1. https://github.com/Gufran \ No newline at end of file diff --git a/vendor/github.com/microcosm-cc/bluemonday/LICENSE.md b/vendor/github.com/microcosm-cc/bluemonday/LICENSE.md new file mode 100644 index 0000000..f822458 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/LICENSE.md @@ -0,0 +1,28 @@ +Copyright (c) 2014, David Kitchen + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the organisation (Microcosm) nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/microcosm-cc/bluemonday/Makefile b/vendor/github.com/microcosm-cc/bluemonday/Makefile new file mode 100644 index 0000000..b15dc74 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/Makefile @@ -0,0 +1,42 @@ +# Targets: +# +# all: Builds the code locally after testing +# +# fmt: Formats the source files +# build: Builds the code locally +# vet: Vets the code +# lint: Runs lint over the code (you do not need to fix everything) +# test: Runs the tests +# cover: Gives you the URL to a nice test coverage report +# +# install: Builds, tests and installs the code locally + +.PHONY: all fmt build vet lint test cover install + +# The first target is always the default action if `make` is called without +# args we build and install into $GOPATH so that it can just be run + +all: fmt vet test install + +fmt: + @gofmt -s -w ./$* + +build: + @go build + +vet: + @go vet *.go + +lint: + @golint *.go + +test: + @go test -v ./... + +cover: COVERAGE_FILE := coverage.out +cover: + @go test -coverprofile=$(COVERAGE_FILE) && \ + cover -html=$(COVERAGE_FILE) && rm $(COVERAGE_FILE) + +install: + @go install ./... diff --git a/vendor/github.com/microcosm-cc/bluemonday/README.md b/vendor/github.com/microcosm-cc/bluemonday/README.md new file mode 100644 index 0000000..ce679c1 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/README.md @@ -0,0 +1,350 @@ +# bluemonday [![Build Status](https://travis-ci.org/microcosm-cc/bluemonday.svg?branch=master)](https://travis-ci.org/microcosm-cc/bluemonday) [![GoDoc](https://godoc.org/github.com/microcosm-cc/bluemonday?status.png)](https://godoc.org/github.com/microcosm-cc/bluemonday) [![Sourcegraph](https://sourcegraph.com/github.com/microcosm-cc/bluemonday/-/badge.svg)](https://sourcegraph.com/github.com/microcosm-cc/bluemonday?badge) + +bluemonday is a HTML sanitizer implemented in Go. It is fast and highly configurable. + +bluemonday takes untrusted user generated content as an input, and will return HTML that has been sanitised against a whitelist of approved HTML elements and attributes so that you can safely include the content in your web page. + +If you accept user generated content, and your server uses Go, you **need** bluemonday. + +The default policy for user generated content (`bluemonday.UGCPolicy().Sanitize()`) turns this: +```html +Hello World +``` + +Into a harmless: +```html +Hello World +``` + +And it turns this: +```html +XSS +``` + +Into this: +```html +XSS +``` + +Whilst still allowing this: +```html + + + +``` + +To pass through mostly unaltered (it gained a rel="nofollow" which is a good thing for user generated content): +```html + + + +``` + +It protects sites from [XSS](http://en.wikipedia.org/wiki/Cross-site_scripting) attacks. There are many [vectors for an XSS attack](https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet) and the best way to mitigate the risk is to sanitize user input against a known safe list of HTML elements and attributes. + +You should **always** run bluemonday **after** any other processing. + +If you use [blackfriday](https://github.com/russross/blackfriday) or [Pandoc](http://johnmacfarlane.net/pandoc/) then bluemonday should be run after these steps. This ensures that no insecure HTML is introduced later in your process. + +bluemonday is heavily inspired by both the [OWASP Java HTML Sanitizer](https://code.google.com/p/owasp-java-html-sanitizer/) and the [HTML Purifier](http://htmlpurifier.org/). + +## Technical Summary + +Whitelist based, you need to either build a policy describing the HTML elements and attributes to permit (and the `regexp` patterns of attributes), or use one of the supplied policies representing good defaults. + +The policy containing the whitelist is applied using a fast non-validating, forward only, token-based parser implemented in the [Go net/html library](https://godoc.org/golang.org/x/net/html) by the core Go team. + +We expect to be supplied with well-formatted HTML (closing elements for every applicable open element, nested correctly) and so we do not focus on repairing badly nested or incomplete HTML. We focus on simply ensuring that whatever elements do exist are described in the policy whitelist and that attributes and links are safe for use on your web page. [GIGO](http://en.wikipedia.org/wiki/Garbage_in,_garbage_out) does apply and if you feed it bad HTML bluemonday is not tasked with figuring out how to make it good again. + +### Supported Go Versions + +bluemonday is tested against Go 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, and tip. + +We do not support Go 1.0 as we depend on `golang.org/x/net/html` which includes a reference to `io.ErrNoProgress` which did not exist in Go 1.0. + +## Is it production ready? + +*Yes* + +We are using bluemonday in production having migrated from the widely used and heavily field tested OWASP Java HTML Sanitizer. + +We are passing our extensive test suite (including AntiSamy tests as well as tests for any issues raised). Check for any [unresolved issues](https://github.com/microcosm-cc/bluemonday/issues?page=1&state=open) to see whether anything may be a blocker for you. + +We invite pull requests and issues to help us ensure we are offering comprehensive protection against various attacks via user generated content. + +## Usage + +Install in your `${GOPATH}` using `go get -u github.com/microcosm-cc/bluemonday` + +Then call it: +```go +package main + +import ( + "fmt" + + "github.com/microcosm-cc/bluemonday" +) + +func main() { + // Do this once for each unique policy, and use the policy for the life of the program + // Policy creation/editing is not safe to use in multiple goroutines + p := bluemonday.UGCPolicy() + + // The policy can then be used to sanitize lots of input and it is safe to use the policy in multiple goroutines + html := p.Sanitize( + `Google`, + ) + + // Output: + // Google + fmt.Println(html) +} +``` + +We offer three ways to call Sanitize: +```go +p.Sanitize(string) string +p.SanitizeBytes([]byte) []byte +p.SanitizeReader(io.Reader) bytes.Buffer +``` + +If you are obsessed about performance, `p.SanitizeReader(r).Bytes()` will return a `[]byte` without performing any unnecessary casting of the inputs or outputs. Though the difference is so negligible you should never need to care. + +You can build your own policies: +```go +package main + +import ( + "fmt" + + "github.com/microcosm-cc/bluemonday" +) + +func main() { + p := bluemonday.NewPolicy() + + // Require URLs to be parseable by net/url.Parse and either: + // mailto: http:// or https:// + p.AllowStandardURLs() + + // We only allow

and + p.AllowAttrs("href").OnElements("a") + p.AllowElements("p") + + html := p.Sanitize( + `Google`, + ) + + // Output: + // Google + fmt.Println(html) +} +``` + +We ship two default policies: + +1. `bluemonday.StrictPolicy()` which can be thought of as equivalent to stripping all HTML elements and their attributes as it has nothing on its whitelist. An example usage scenario would be blog post titles where HTML tags are not expected at all and if they are then the elements *and* the content of the elements should be stripped. This is a *very* strict policy. +2. `bluemonday.UGCPolicy()` which allows a broad selection of HTML elements and attributes that are safe for user generated content. Note that this policy does *not* whitelist iframes, object, embed, styles, script, etc. An example usage scenario would be blog post bodies where a variety of formatting is expected along with the potential for TABLEs and IMGs. + +## Policy Building + +The essence of building a policy is to determine which HTML elements and attributes are considered safe for your scenario. OWASP provide an [XSS prevention cheat sheet](https://www.owasp.org/index.php/XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet) to help explain the risks, but essentially: + +1. Avoid anything other than the standard HTML elements +1. Avoid `script`, `style`, `iframe`, `object`, `embed`, `base` elements that allow code to be executed by the client or third party content to be included that can execute code +1. Avoid anything other than plain HTML attributes with values matched to a regexp + +Basically, you should be able to describe what HTML is fine for your scenario. If you do not have confidence that you can describe your policy please consider using one of the shipped policies such as `bluemonday.UGCPolicy()`. + +To create a new policy: +```go +p := bluemonday.NewPolicy() +``` + +To add elements to a policy either add just the elements: +```go +p.AllowElements("b", "strong") +``` + +Or add elements as a virtue of adding an attribute: +```go +// Not the recommended pattern, see the recommendation on using .Matching() below +p.AllowAttrs("nowrap").OnElements("td", "th") +``` + +Attributes can either be added to all elements: +```go +p.AllowAttrs("dir").Matching(regexp.MustCompile("(?i)rtl|ltr")).Globally() +``` + +Or attributes can be added to specific elements: +```go +// Not the recommended pattern, see the recommendation on using .Matching() below +p.AllowAttrs("value").OnElements("li") +``` + +It is **always** recommended that an attribute be made to match a pattern. XSS in HTML attributes is very easy otherwise: +```go +// \p{L} matches unicode letters, \p{N} matches unicode numbers +p.AllowAttrs("title").Matching(regexp.MustCompile(`[\p{L}\p{N}\s\-_',:\[\]!\./\\\(\)&]*`)).Globally() +``` + +You can stop at any time and call .Sanitize(): +```go +// string htmlIn passed in from a HTTP POST +htmlOut := p.Sanitize(htmlIn) +``` + +And you can take any existing policy and extend it: +```go +p := bluemonday.UGCPolicy() +p.AllowElements("fieldset", "select", "option") +``` + +### Links + +Links are difficult beasts to sanitise safely and also one of the biggest attack vectors for malicious content. + +It is possible to do this: +```go +p.AllowAttrs("href").Matching(regexp.MustCompile(`(?i)mailto|https?`)).OnElements("a") +``` + +But that will not protect you as the regular expression is insufficient in this case to have prevented a malformed value doing something unexpected. + +We provide some additional global options for safely working with links. + +`RequireParseableURLs` will ensure that URLs are parseable by Go's `net/url` package: +```go +p.RequireParseableURLs(true) +``` + +If you have enabled parseable URLs then the following option will `AllowRelativeURLs`. By default this is disabled (bluemonday is a whitelist tool... you need to explicitly tell us to permit things) and when disabled it will prevent all local and scheme relative URLs (i.e. `href="localpage.html"`, `href="../home.html"` and even `href="//www.google.com"` are relative): +```go +p.AllowRelativeURLs(true) +``` + +If you have enabled parseable URLs then you can whitelist the schemes (commonly called protocol when thinking of `http` and `https`) that are permitted. Bear in mind that allowing relative URLs in the above option will allow for a blank scheme: +```go +p.AllowURLSchemes("mailto", "http", "https") +``` + +Regardless of whether you have enabled parseable URLs, you can force all URLs to have a rel="nofollow" attribute. This will be added if it does not exist, but only when the `href` is valid: +```go +// This applies to "a" "area" "link" elements that have a "href" attribute +p.RequireNoFollowOnLinks(true) +``` + +We provide a convenience method that applies all of the above, but you will still need to whitelist the linkable elements for the URL rules to be applied to: +```go +p.AllowStandardURLs() +p.AllowAttrs("cite").OnElements("blockquote", "q") +p.AllowAttrs("href").OnElements("a", "area") +p.AllowAttrs("src").OnElements("img") +``` + +An additional complexity regarding links is the data URI as defined in [RFC2397](http://tools.ietf.org/html/rfc2397). The data URI allows for images to be served inline using this format: + +```html + +``` + +We have provided a helper to verify the mimetype followed by base64 content of data URIs links: + +```go +p.AllowDataURIImages() +``` + +That helper will enable GIF, JPEG, PNG and WEBP images. + +It should be noted that there is a potential [security](http://palizine.plynt.com/issues/2010Oct/bypass-xss-filters/) [risk](https://capec.mitre.org/data/definitions/244.html) with the use of data URI links. You should only enable data URI links if you already trust the content. + +We also have some features to help deal with user generated content: +```go +p.AddTargetBlankToFullyQualifiedLinks(true) +``` + +This will ensure that anchor `` links that are fully qualified (the href destination includes a host name) will get `target="_blank"` added to them. + +Additionally any link that has `target="_blank"` after the policy has been applied will also have the `rel` attribute adjusted to add `noopener`. This means a link may start like `` and will end up as ``. It is important to note that the addition of `noopener` is a security feature and not an issue. There is an unfortunate feature to browsers that a browser window opened as a result of `target="_blank"` can still control the opener (your web page) and this protects against that. The background to this can be found here: [https://dev.to/ben/the-targetblank-vulnerability-by-example](https://dev.to/ben/the-targetblank-vulnerability-by-example) + +### Policy Building Helpers + +We also bundle some helpers to simplify policy building: +```go + +// Permits the "dir", "id", "lang", "title" attributes globally +p.AllowStandardAttributes() + +// Permits the "img" element and its standard attributes +p.AllowImages() + +// Permits ordered and unordered lists, and also definition lists +p.AllowLists() + +// Permits HTML tables and all applicable elements and non-styling attributes +p.AllowTables() +``` + +### Invalid Instructions + +The following are invalid: +```go +// This does not say where the attributes are allowed, you need to add +// .Globally() or .OnElements(...) +// This will be ignored without error. +p.AllowAttrs("value") + +// This does not say where the attributes are allowed, you need to add +// .Globally() or .OnElements(...) +// This will be ignored without error. +p.AllowAttrs( + "type", +).Matching( + regexp.MustCompile("(?i)^(circle|disc|square|a|A|i|I|1)$"), +) +``` + +Both examples exhibit the same issue, they declare attributes but do not then specify whether they are whitelisted globally or only on specific elements (and which elements). Attributes belong to one or more elements, and the policy needs to declare this. + +## Limitations + +We are not yet including any tools to help whitelist and sanitize CSS. Which means that unless you wish to do the heavy lifting in a single regular expression (inadvisable), **you should not allow the "style" attribute anywhere**. + +It is not the job of bluemonday to fix your bad HTML, it is merely the job of bluemonday to prevent malicious HTML getting through. If you have mismatched HTML elements, or non-conforming nesting of elements, those will remain. But if you have well-structured HTML bluemonday will not break it. + +## TODO + +* Add support for CSS sanitisation to allow some CSS properties based on a whitelist, possibly using the [Gorilla CSS3 scanner](http://www.gorillatoolkit.org/pkg/css/scanner) - PRs welcome so long as testing covers XSS and demonstrates safety first +* Investigate whether devs want to blacklist elements and attributes. This would allow devs to take an existing policy (such as the `bluemonday.UGCPolicy()` ) that encapsulates 90% of what they're looking for but does more than they need, and to remove the extra things they do not want to make it 100% what they want +* Investigate whether devs want a validating HTML mode, in which the HTML elements are not just transformed into a balanced tree (every start tag has a closing tag at the correct depth) but also that elements and character data appear only in their allowed context (i.e. that a `table` element isn't a descendent of a `caption`, that `colgroup`, `thead`, `tbody`, `tfoot` and `tr` are permitted, and that character data is not permitted) + +## Development + +If you have cloned this repo you will probably need the dependency: + +`go get golang.org/x/net/html` + +Gophers can use their familiar tools: + +`go build` + +`go test` + +I personally use a Makefile as it spares typing the same args over and over whilst providing consistency for those of us who jump from language to language and enjoy just typing `make` in a project directory and watch magic happen. + +`make` will build, vet, test and install the library. + +`make clean` will remove the library from a *single* `${GOPATH}/pkg` directory tree + +`make test` will run the tests + +`make cover` will run the tests and *open a browser window* with the coverage report + +`make lint` will run golint (install via `go get github.com/golang/lint/golint`) + +## Long term goals + +1. Open the code to adversarial peer review similar to the [Attack Review Ground Rules](https://code.google.com/p/owasp-java-html-sanitizer/wiki/AttackReviewGroundRules) +1. Raise funds and pay for an external security review diff --git a/vendor/github.com/microcosm-cc/bluemonday/doc.go b/vendor/github.com/microcosm-cc/bluemonday/doc.go new file mode 100644 index 0000000..71dab60 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/doc.go @@ -0,0 +1,104 @@ +// Copyright (c) 2014, David Kitchen +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * Neither the name of the organisation (Microcosm) nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package bluemonday provides a way of describing a whitelist of HTML elements +and attributes as a policy, and for that policy to be applied to untrusted +strings from users that may contain markup. All elements and attributes not on +the whitelist will be stripped. + +The default bluemonday.UGCPolicy().Sanitize() turns this: + + Hello World + +Into the more harmless: + + Hello World + +And it turns this: + + XSS + +Into this: + + XSS + +Whilst still allowing this: + + + + + +To pass through mostly unaltered (it gained a rel="nofollow"): + + + + + +The primary purpose of bluemonday is to take potentially unsafe user generated +content (from things like Markdown, HTML WYSIWYG tools, etc) and make it safe +for you to put on your website. + +It protects sites against XSS (http://en.wikipedia.org/wiki/Cross-site_scripting) +and other malicious content that a user interface may deliver. There are many +vectors for an XSS attack (https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet) +and the safest thing to do is to sanitize user input against a known safe list +of HTML elements and attributes. + +Note: You should always run bluemonday after any other processing. + +If you use blackfriday (https://github.com/russross/blackfriday) or +Pandoc (http://johnmacfarlane.net/pandoc/) then bluemonday should be run after +these steps. This ensures that no insecure HTML is introduced later in your +process. + +bluemonday is heavily inspired by both the OWASP Java HTML Sanitizer +(https://code.google.com/p/owasp-java-html-sanitizer/) and the HTML Purifier +(http://htmlpurifier.org/). + +We ship two default policies, one is bluemonday.StrictPolicy() and can be +thought of as equivalent to stripping all HTML elements and their attributes as +it has nothing on its whitelist. + +The other is bluemonday.UGCPolicy() and allows a broad selection of HTML +elements and attributes that are safe for user generated content. Note that +this policy does not whitelist iframes, object, embed, styles, script, etc. + +The essence of building a policy is to determine which HTML elements and +attributes are considered safe for your scenario. OWASP provide an XSS +prevention cheat sheet ( https://www.google.com/search?q=xss+prevention+cheat+sheet ) +to help explain the risks, but essentially: + + 1. Avoid whitelisting anything other than plain HTML elements + 2. Avoid whitelisting `script`, `style`, `iframe`, `object`, `embed`, `base` + elements + 3. Avoid whitelisting anything other than plain HTML elements with simple + values that you can match to a regexp +*/ +package bluemonday diff --git a/vendor/github.com/microcosm-cc/bluemonday/go.mod b/vendor/github.com/microcosm-cc/bluemonday/go.mod new file mode 100644 index 0000000..fa8453c --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/go.mod @@ -0,0 +1,5 @@ +module github.com/microcosm-cc/bluemonday + +go 1.9 + +require golang.org/x/net v0.0.0-20181220203305-927f97764cc3 diff --git a/vendor/github.com/microcosm-cc/bluemonday/go.sum b/vendor/github.com/microcosm-cc/bluemonday/go.sum new file mode 100644 index 0000000..bee241d --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/go.sum @@ -0,0 +1,2 @@ +golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/vendor/github.com/microcosm-cc/bluemonday/helpers.go b/vendor/github.com/microcosm-cc/bluemonday/helpers.go new file mode 100644 index 0000000..dfa5868 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/helpers.go @@ -0,0 +1,297 @@ +// Copyright (c) 2014, David Kitchen +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * Neither the name of the organisation (Microcosm) nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bluemonday + +import ( + "encoding/base64" + "net/url" + "regexp" +) + +// A selection of regular expressions that can be used as .Matching() rules on +// HTML attributes. +var ( + // CellAlign handles the `align` attribute + // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/td#attr-align + CellAlign = regexp.MustCompile(`(?i)^(center|justify|left|right|char)$`) + + // CellVerticalAlign handles the `valign` attribute + // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/td#attr-valign + CellVerticalAlign = regexp.MustCompile(`(?i)^(baseline|bottom|middle|top)$`) + + // Direction handles the `dir` attribute + // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/bdo#attr-dir + Direction = regexp.MustCompile(`(?i)^(rtl|ltr)$`) + + // ImageAlign handles the `align` attribute on the `image` tag + // http://www.w3.org/MarkUp/Test/Img/imgtest.html + ImageAlign = regexp.MustCompile( + `(?i)^(left|right|top|texttop|middle|absmiddle|baseline|bottom|absbottom)$`, + ) + + // Integer describes whole positive integers (including 0) used in places + // like td.colspan + // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/td#attr-colspan + Integer = regexp.MustCompile(`^[0-9]+$`) + + // ISO8601 according to the W3 group is only a subset of the ISO8601 + // standard: http://www.w3.org/TR/NOTE-datetime + // + // Used in places like time.datetime + // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/time#attr-datetime + // + // Matches patterns: + // Year: + // YYYY (eg 1997) + // Year and month: + // YYYY-MM (eg 1997-07) + // Complete date: + // YYYY-MM-DD (eg 1997-07-16) + // Complete date plus hours and minutes: + // YYYY-MM-DDThh:mmTZD (eg 1997-07-16T19:20+01:00) + // Complete date plus hours, minutes and seconds: + // YYYY-MM-DDThh:mm:ssTZD (eg 1997-07-16T19:20:30+01:00) + // Complete date plus hours, minutes, seconds and a decimal fraction of a + // second + // YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00) + ISO8601 = regexp.MustCompile( + `^[0-9]{4}(-[0-9]{2}(-[0-9]{2}([ T][0-9]{2}(:[0-9]{2}){1,2}(.[0-9]{1,6})` + + `?Z?([\+-][0-9]{2}:[0-9]{2})?)?)?)?$`, + ) + + // ListType encapsulates the common value as well as the latest spec + // values for lists + // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/ol#attr-type + ListType = regexp.MustCompile(`(?i)^(circle|disc|square|a|A|i|I|1)$`) + + // SpaceSeparatedTokens is used in places like `a.rel` and the common attribute + // `class` which both contain space delimited lists of data tokens + // http://www.w3.org/TR/html-markup/datatypes.html#common.data.tokens-def + // Regexp: \p{L} matches unicode letters, \p{N} matches unicode numbers + SpaceSeparatedTokens = regexp.MustCompile(`^([\s\p{L}\p{N}_-]+)$`) + + // Number is a double value used on HTML5 meter and progress elements + // http://www.whatwg.org/specs/web-apps/current-work/multipage/the-button-element.html#the-meter-element + Number = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$`) + + // NumberOrPercent is used predominantly as units of measurement in width + // and height attributes + // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/img#attr-height + NumberOrPercent = regexp.MustCompile(`^[0-9]+[%]?$`) + + // Paragraph of text in an attribute such as *.'title', img.alt, etc + // https://developer.mozilla.org/en-US/docs/Web/HTML/Global_attributes#attr-title + // Note that we are not allowing chars that could close tags like '>' + Paragraph = regexp.MustCompile(`^[\p{L}\p{N}\s\-_',\[\]!\./\\\(\)]*$`) + + // dataURIImagePrefix is used by AllowDataURIImages to define the acceptable + // prefix of data URIs that contain common web image formats. + // + // This is not exported as it's not useful by itself, and only has value + // within the AllowDataURIImages func + dataURIImagePrefix = regexp.MustCompile( + `^image/(gif|jpeg|png|webp);base64,`, + ) +) + +// AllowStandardURLs is a convenience function that will enable rel="nofollow" +// on "a", "area" and "link" (if you have allowed those elements) and will +// ensure that the URL values are parseable and either relative or belong to the +// "mailto", "http", or "https" schemes +func (p *Policy) AllowStandardURLs() { + // URLs must be parseable by net/url.Parse() + p.RequireParseableURLs(true) + + // !url.IsAbs() is permitted + p.AllowRelativeURLs(true) + + // Most common URL schemes only + p.AllowURLSchemes("mailto", "http", "https") + + // For all anchors we will add rel="nofollow" if it does not already exist + // This applies to "a" "area" "link" + p.RequireNoFollowOnLinks(true) +} + +// AllowStandardAttributes will enable "id", "title" and the language specific +// attributes "dir" and "lang" on all elements that are whitelisted +func (p *Policy) AllowStandardAttributes() { + // "dir" "lang" are permitted as both language attributes affect charsets + // and direction of text. + p.AllowAttrs("dir").Matching(Direction).Globally() + p.AllowAttrs( + "lang", + ).Matching(regexp.MustCompile(`[a-zA-Z]{2,20}`)).Globally() + + // "id" is permitted. This is pretty much as some HTML elements require this + // to work well ("dfn" is an example of a "id" being value) + // This does create a risk that JavaScript and CSS within your web page + // might identify the wrong elements. Ensure that you select things + // accurately + p.AllowAttrs("id").Matching( + regexp.MustCompile(`[a-zA-Z0-9\:\-_\.]+`), + ).Globally() + + // "title" is permitted as it improves accessibility. + p.AllowAttrs("title").Matching(Paragraph).Globally() +} + +// AllowStyling presently enables the class attribute globally. +// +// Note: When bluemonday ships a CSS parser and we can safely sanitise that, +// this will also allow sanitized styling of elements via the style attribute. +func (p *Policy) AllowStyling() { + + // "class" is permitted globally + p.AllowAttrs("class").Matching(SpaceSeparatedTokens).Globally() +} + +// AllowImages enables the img element and some popular attributes. It will also +// ensure that URL values are parseable. This helper does not enable data URI +// images, for that you should also use the AllowDataURIImages() helper. +func (p *Policy) AllowImages() { + + // "img" is permitted + p.AllowAttrs("align").Matching(ImageAlign).OnElements("img") + p.AllowAttrs("alt").Matching(Paragraph).OnElements("img") + p.AllowAttrs("height", "width").Matching(NumberOrPercent).OnElements("img") + + // Standard URLs enabled + p.AllowStandardURLs() + p.AllowAttrs("src").OnElements("img") +} + +// AllowDataURIImages permits the use of inline images defined in RFC2397 +// http://tools.ietf.org/html/rfc2397 +// http://en.wikipedia.org/wiki/Data_URI_scheme +// +// Images must have a mimetype matching: +// image/gif +// image/jpeg +// image/png +// image/webp +// +// NOTE: There is a potential security risk to allowing data URIs and you should +// only permit them on content you already trust. +// http://palizine.plynt.com/issues/2010Oct/bypass-xss-filters/ +// https://capec.mitre.org/data/definitions/244.html +func (p *Policy) AllowDataURIImages() { + + // URLs must be parseable by net/url.Parse() + p.RequireParseableURLs(true) + + // Supply a function to validate images contained within data URI + p.AllowURLSchemeWithCustomPolicy( + "data", + func(url *url.URL) (allowUrl bool) { + if url.RawQuery != "" || url.Fragment != "" { + return false + } + + matched := dataURIImagePrefix.FindString(url.Opaque) + if matched == "" { + return false + } + + _, err := base64.StdEncoding.DecodeString(url.Opaque[len(matched):]) + if err != nil { + return false + } + + return true + }, + ) +} + +// AllowLists will enabled ordered and unordered lists, as well as definition +// lists +func (p *Policy) AllowLists() { + // "ol" "ul" are permitted + p.AllowAttrs("type").Matching(ListType).OnElements("ol", "ul") + + // "li" is permitted + p.AllowAttrs("type").Matching(ListType).OnElements("li") + p.AllowAttrs("value").Matching(Integer).OnElements("li") + + // "dl" "dt" "dd" are permitted + p.AllowElements("dl", "dt", "dd") +} + +// AllowTables will enable a rich set of elements and attributes to describe +// HTML tables +func (p *Policy) AllowTables() { + + // "table" is permitted + p.AllowAttrs("height", "width").Matching(NumberOrPercent).OnElements("table") + p.AllowAttrs("summary").Matching(Paragraph).OnElements("table") + + // "caption" is permitted + p.AllowElements("caption") + + // "col" "colgroup" are permitted + p.AllowAttrs("align").Matching(CellAlign).OnElements("col", "colgroup") + p.AllowAttrs("height", "width").Matching( + NumberOrPercent, + ).OnElements("col", "colgroup") + p.AllowAttrs("span").Matching(Integer).OnElements("colgroup", "col") + p.AllowAttrs("valign").Matching( + CellVerticalAlign, + ).OnElements("col", "colgroup") + + // "thead" "tr" are permitted + p.AllowAttrs("align").Matching(CellAlign).OnElements("thead", "tr") + p.AllowAttrs("valign").Matching(CellVerticalAlign).OnElements("thead", "tr") + + // "td" "th" are permitted + p.AllowAttrs("abbr").Matching(Paragraph).OnElements("td", "th") + p.AllowAttrs("align").Matching(CellAlign).OnElements("td", "th") + p.AllowAttrs("colspan", "rowspan").Matching(Integer).OnElements("td", "th") + p.AllowAttrs("headers").Matching( + SpaceSeparatedTokens, + ).OnElements("td", "th") + p.AllowAttrs("height", "width").Matching( + NumberOrPercent, + ).OnElements("td", "th") + p.AllowAttrs( + "scope", + ).Matching( + regexp.MustCompile(`(?i)(?:row|col)(?:group)?`), + ).OnElements("td", "th") + p.AllowAttrs("valign").Matching(CellVerticalAlign).OnElements("td", "th") + p.AllowAttrs("nowrap").Matching( + regexp.MustCompile(`(?i)|nowrap`), + ).OnElements("td", "th") + + // "tbody" "tfoot" + p.AllowAttrs("align").Matching(CellAlign).OnElements("tbody", "tfoot") + p.AllowAttrs("valign").Matching( + CellVerticalAlign, + ).OnElements("tbody", "tfoot") +} diff --git a/vendor/github.com/microcosm-cc/bluemonday/policies.go b/vendor/github.com/microcosm-cc/bluemonday/policies.go new file mode 100644 index 0000000..570bba8 --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/policies.go @@ -0,0 +1,253 @@ +// Copyright (c) 2014, David Kitchen +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * Neither the name of the organisation (Microcosm) nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bluemonday + +import ( + "regexp" +) + +// StrictPolicy returns an empty policy, which will effectively strip all HTML +// elements and their attributes from a document. +func StrictPolicy() *Policy { + return NewPolicy() +} + +// StripTagsPolicy is DEPRECATED. Use StrictPolicy instead. +func StripTagsPolicy() *Policy { + return StrictPolicy() +} + +// UGCPolicy returns a policy aimed at user generated content that is a result +// of HTML WYSIWYG tools and Markdown conversions. +// +// This is expected to be a fairly rich document where as much markup as +// possible should be retained. Markdown permits raw HTML so we are basically +// providing a policy to sanitise HTML5 documents safely but with the +// least intrusion on the formatting expectations of the user. +func UGCPolicy() *Policy { + + p := NewPolicy() + + /////////////////////// + // Global attributes // + /////////////////////// + + // "class" is not permitted as we are not allowing users to style their own + // content + + p.AllowStandardAttributes() + + ////////////////////////////// + // Global URL format policy // + ////////////////////////////// + + p.AllowStandardURLs() + + //////////////////////////////// + // Declarations and structure // + //////////////////////////////// + + // "xml" "xslt" "DOCTYPE" "html" "head" are not permitted as we are + // expecting user generated content to be a fragment of HTML and not a full + // document. + + ////////////////////////// + // Sectioning root tags // + ////////////////////////// + + // "article" and "aside" are permitted and takes no attributes + p.AllowElements("article", "aside") + + // "body" is not permitted as we are expecting user generated content to be a fragment + // of HTML and not a full document. + + // "details" is permitted, including the "open" attribute which can either + // be blank or the value "open". + p.AllowAttrs( + "open", + ).Matching(regexp.MustCompile(`(?i)^(|open)$`)).OnElements("details") + + // "fieldset" is not permitted as we are not allowing forms to be created. + + // "figure" is permitted and takes no attributes + p.AllowElements("figure") + + // "nav" is not permitted as it is assumed that the site (and not the user) + // has defined navigation elements + + // "section" is permitted and takes no attributes + p.AllowElements("section") + + // "summary" is permitted and takes no attributes + p.AllowElements("summary") + + ////////////////////////// + // Headings and footers // + ////////////////////////// + + // "footer" is not permitted as we expect user content to be a fragment and + // not structural to this extent + + // "h1" through "h6" are permitted and take no attributes + p.AllowElements("h1", "h2", "h3", "h4", "h5", "h6") + + // "header" is not permitted as we expect user content to be a fragment and + // not structural to this extent + + // "hgroup" is permitted and takes no attributes + p.AllowElements("hgroup") + + ///////////////////////////////////// + // Content grouping and separating // + ///////////////////////////////////// + + // "blockquote" is permitted, including the "cite" attribute which must be + // a standard URL. + p.AllowAttrs("cite").OnElements("blockquote") + + // "br" "div" "hr" "p" "span" "wbr" are permitted and take no attributes + p.AllowElements("br", "div", "hr", "p", "span", "wbr") + + /////////// + // Links // + /////////// + + // "a" is permitted + p.AllowAttrs("href").OnElements("a") + + // "area" is permitted along with the attributes that map image maps work + p.AllowAttrs("name").Matching( + regexp.MustCompile(`^([\p{L}\p{N}_-]+)$`), + ).OnElements("map") + p.AllowAttrs("alt").Matching(Paragraph).OnElements("area") + p.AllowAttrs("coords").Matching( + regexp.MustCompile(`^([0-9]+,)+[0-9]+$`), + ).OnElements("area") + p.AllowAttrs("href").OnElements("area") + p.AllowAttrs("rel").Matching(SpaceSeparatedTokens).OnElements("area") + p.AllowAttrs("shape").Matching( + regexp.MustCompile(`(?i)^(default|circle|rect|poly)$`), + ).OnElements("area") + p.AllowAttrs("usemap").Matching( + regexp.MustCompile(`(?i)^#[\p{L}\p{N}_-]+$`), + ).OnElements("img") + + // "link" is not permitted + + ///////////////////// + // Phrase elements // + ///////////////////// + + // The following are all inline phrasing elements + p.AllowElements("abbr", "acronym", "cite", "code", "dfn", "em", + "figcaption", "mark", "s", "samp", "strong", "sub", "sup", "var") + + // "q" is permitted and "cite" is a URL and handled by URL policies + p.AllowAttrs("cite").OnElements("q") + + // "time" is permitted + p.AllowAttrs("datetime").Matching(ISO8601).OnElements("time") + + //////////////////// + // Style elements // + //////////////////// + + // block and inline elements that impart no semantic meaning but style the + // document + p.AllowElements("b", "i", "pre", "small", "strike", "tt", "u") + + // "style" is not permitted as we are not yet sanitising CSS and it is an + // XSS attack vector + + ////////////////////// + // HTML5 Formatting // + ////////////////////// + + // "bdi" "bdo" are permitted + p.AllowAttrs("dir").Matching(Direction).OnElements("bdi", "bdo") + + // "rp" "rt" "ruby" are permitted + p.AllowElements("rp", "rt", "ruby") + + /////////////////////////// + // HTML5 Change tracking // + /////////////////////////// + + // "del" "ins" are permitted + p.AllowAttrs("cite").Matching(Paragraph).OnElements("del", "ins") + p.AllowAttrs("datetime").Matching(ISO8601).OnElements("del", "ins") + + /////////// + // Lists // + /////////// + + p.AllowLists() + + //////////// + // Tables // + //////////// + + p.AllowTables() + + /////////// + // Forms // + /////////// + + // By and large, forms are not permitted. However there are some form + // elements that can be used to present data, and we do permit those + // + // "button" "fieldset" "input" "keygen" "label" "output" "select" "datalist" + // "textarea" "optgroup" "option" are all not permitted + + // "meter" is permitted + p.AllowAttrs( + "value", + "min", + "max", + "low", + "high", + "optimum", + ).Matching(Number).OnElements("meter") + + // "progress" is permitted + p.AllowAttrs("value", "max").Matching(Number).OnElements("progress") + + ////////////////////// + // Embedded content // + ////////////////////// + + // Vast majority not permitted + // "audio" "canvas" "embed" "iframe" "object" "param" "source" "svg" "track" + // "video" are all not permitted + + p.AllowImages() + + return p +} diff --git a/vendor/github.com/microcosm-cc/bluemonday/policy.go b/vendor/github.com/microcosm-cc/bluemonday/policy.go new file mode 100644 index 0000000..f61d98f --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/policy.go @@ -0,0 +1,552 @@ +// Copyright (c) 2014, David Kitchen +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * Neither the name of the organisation (Microcosm) nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bluemonday + +import ( + "net/url" + "regexp" + "strings" +) + +// Policy encapsulates the whitelist of HTML elements and attributes that will +// be applied to the sanitised HTML. +// +// You should use bluemonday.NewPolicy() to create a blank policy as the +// unexported fields contain maps that need to be initialized. +type Policy struct { + + // Declares whether the maps have been initialized, used as a cheap check to + // ensure that those using Policy{} directly won't cause nil pointer + // exceptions + initialized bool + + // If true then we add spaces when stripping tags, specifically the closing + // tag is replaced by a space character. + addSpaces bool + + // When true, add rel="nofollow" to HTML anchors + requireNoFollow bool + + // When true, add rel="nofollow" to HTML anchors + // Will add for href="http://foo" + // Will skip for href="/foo" or href="foo" + requireNoFollowFullyQualifiedLinks bool + + // When true add target="_blank" to fully qualified links + // Will add for href="http://foo" + // Will skip for href="/foo" or href="foo" + addTargetBlankToFullyQualifiedLinks bool + + // When true, URLs must be parseable by "net/url" url.Parse() + requireParseableURLs bool + + // When true, u, _ := url.Parse("url"); !u.IsAbs() is permitted + allowRelativeURLs bool + + // When true, allow data attributes. + allowDataAttributes bool + + // map[htmlElementName]map[htmlAttributeName]attrPolicy + elsAndAttrs map[string]map[string]attrPolicy + + // map[htmlAttributeName]attrPolicy + globalAttrs map[string]attrPolicy + + // If urlPolicy is nil, all URLs with matching schema are allowed. + // Otherwise, only the URLs with matching schema and urlPolicy(url) + // returning true are allowed. + allowURLSchemes map[string]urlPolicy + + // If an element has had all attributes removed as a result of a policy + // being applied, then the element would be removed from the output. + // + // However some elements are valid and have strong layout meaning without + // any attributes, i.e. . To prevent those being removed we maintain + // a list of elements that are allowed to have no attributes and that will + // be maintained in the output HTML. + setOfElementsAllowedWithoutAttrs map[string]struct{} + + setOfElementsToSkipContent map[string]struct{} +} + +type attrPolicy struct { + + // optional pattern to match, when not nil the regexp needs to match + // otherwise the attribute is removed + regexp *regexp.Regexp +} + +type attrPolicyBuilder struct { + p *Policy + + attrNames []string + regexp *regexp.Regexp + allowEmpty bool +} + +type urlPolicy func(url *url.URL) (allowUrl bool) + +// init initializes the maps if this has not been done already +func (p *Policy) init() { + if !p.initialized { + p.elsAndAttrs = make(map[string]map[string]attrPolicy) + p.globalAttrs = make(map[string]attrPolicy) + p.allowURLSchemes = make(map[string]urlPolicy) + p.setOfElementsAllowedWithoutAttrs = make(map[string]struct{}) + p.setOfElementsToSkipContent = make(map[string]struct{}) + p.initialized = true + } +} + +// NewPolicy returns a blank policy with nothing whitelisted or permitted. This +// is the recommended way to start building a policy and you should now use +// AllowAttrs() and/or AllowElements() to construct the whitelist of HTML +// elements and attributes. +func NewPolicy() *Policy { + + p := Policy{} + + p.addDefaultElementsWithoutAttrs() + p.addDefaultSkipElementContent() + + return &p +} + +// AllowAttrs takes a range of HTML attribute names and returns an +// attribute policy builder that allows you to specify the pattern and scope of +// the whitelisted attribute. +// +// The attribute policy is only added to the core policy when either Globally() +// or OnElements(...) are called. +func (p *Policy) AllowAttrs(attrNames ...string) *attrPolicyBuilder { + + p.init() + + abp := attrPolicyBuilder{ + p: p, + allowEmpty: false, + } + + for _, attrName := range attrNames { + abp.attrNames = append(abp.attrNames, strings.ToLower(attrName)) + } + + return &abp +} + +// AllowDataAttributes whitelists all data attributes. We can't specify the name +// of each attribute exactly as they are customized. +// +// NOTE: These values are not sanitized and applications that evaluate or process +// them without checking and verification of the input may be at risk if this option +// is enabled. This is a 'caveat emptor' option and the person enabling this option +// needs to fully understand the potential impact with regards to whatever application +// will be consuming the sanitized HTML afterwards, i.e. if you know you put a link in a +// data attribute and use that to automatically load some new window then you're giving +// the author of a HTML fragment the means to open a malicious destination automatically. +// Use with care! +func (p *Policy) AllowDataAttributes() { + p.allowDataAttributes = true +} + +// AllowNoAttrs says that attributes on element are optional. +// +// The attribute policy is only added to the core policy when OnElements(...) +// are called. +func (p *Policy) AllowNoAttrs() *attrPolicyBuilder { + + p.init() + + abp := attrPolicyBuilder{ + p: p, + allowEmpty: true, + } + return &abp +} + +// AllowNoAttrs says that attributes on element are optional. +// +// The attribute policy is only added to the core policy when OnElements(...) +// are called. +func (abp *attrPolicyBuilder) AllowNoAttrs() *attrPolicyBuilder { + + abp.allowEmpty = true + + return abp +} + +// Matching allows a regular expression to be applied to a nascent attribute +// policy, and returns the attribute policy. Calling this more than once will +// replace the existing regexp. +func (abp *attrPolicyBuilder) Matching(regex *regexp.Regexp) *attrPolicyBuilder { + + abp.regexp = regex + + return abp +} + +// OnElements will bind an attribute policy to a given range of HTML elements +// and return the updated policy +func (abp *attrPolicyBuilder) OnElements(elements ...string) *Policy { + + for _, element := range elements { + element = strings.ToLower(element) + + for _, attr := range abp.attrNames { + + if _, ok := abp.p.elsAndAttrs[element]; !ok { + abp.p.elsAndAttrs[element] = make(map[string]attrPolicy) + } + + ap := attrPolicy{} + if abp.regexp != nil { + ap.regexp = abp.regexp + } + + abp.p.elsAndAttrs[element][attr] = ap + } + + if abp.allowEmpty { + abp.p.setOfElementsAllowedWithoutAttrs[element] = struct{}{} + + if _, ok := abp.p.elsAndAttrs[element]; !ok { + abp.p.elsAndAttrs[element] = make(map[string]attrPolicy) + } + } + } + + return abp.p +} + +// Globally will bind an attribute policy to all HTML elements and return the +// updated policy +func (abp *attrPolicyBuilder) Globally() *Policy { + + for _, attr := range abp.attrNames { + if _, ok := abp.p.globalAttrs[attr]; !ok { + abp.p.globalAttrs[attr] = attrPolicy{} + } + + ap := attrPolicy{} + if abp.regexp != nil { + ap.regexp = abp.regexp + } + + abp.p.globalAttrs[attr] = ap + } + + return abp.p +} + +// AllowElements will append HTML elements to the whitelist without applying an +// attribute policy to those elements (the elements are permitted +// sans-attributes) +func (p *Policy) AllowElements(names ...string) *Policy { + p.init() + + for _, element := range names { + element = strings.ToLower(element) + + if _, ok := p.elsAndAttrs[element]; !ok { + p.elsAndAttrs[element] = make(map[string]attrPolicy) + } + } + + return p +} + +// RequireNoFollowOnLinks will result in all tags having a rel="nofollow" +// added to them if one does not already exist +// +// Note: This requires p.RequireParseableURLs(true) and will enable it. +func (p *Policy) RequireNoFollowOnLinks(require bool) *Policy { + + p.requireNoFollow = require + p.requireParseableURLs = true + + return p +} + +// RequireNoFollowOnFullyQualifiedLinks will result in all tags that point +// to a non-local destination (i.e. starts with a protocol and has a host) +// having a rel="nofollow" added to them if one does not already exist +// +// Note: This requires p.RequireParseableURLs(true) and will enable it. +func (p *Policy) RequireNoFollowOnFullyQualifiedLinks(require bool) *Policy { + + p.requireNoFollowFullyQualifiedLinks = require + p.requireParseableURLs = true + + return p +} + +// AddTargetBlankToFullyQualifiedLinks will result in all tags that point +// to a non-local destination (i.e. starts with a protocol and has a host) +// having a target="_blank" added to them if one does not already exist +// +// Note: This requires p.RequireParseableURLs(true) and will enable it. +func (p *Policy) AddTargetBlankToFullyQualifiedLinks(require bool) *Policy { + + p.addTargetBlankToFullyQualifiedLinks = require + p.requireParseableURLs = true + + return p +} + +// RequireParseableURLs will result in all URLs requiring that they be parseable +// by "net/url" url.Parse() +// This applies to: +// - a.href +// - area.href +// - blockquote.cite +// - img.src +// - link.href +// - script.src +func (p *Policy) RequireParseableURLs(require bool) *Policy { + + p.requireParseableURLs = require + + return p +} + +// AllowRelativeURLs enables RequireParseableURLs and then permits URLs that +// are parseable, have no schema information and url.IsAbs() returns false +// This permits local URLs +func (p *Policy) AllowRelativeURLs(require bool) *Policy { + + p.RequireParseableURLs(true) + p.allowRelativeURLs = require + + return p +} + +// AllowURLSchemes will append URL schemes to the whitelist +// Example: p.AllowURLSchemes("mailto", "http", "https") +func (p *Policy) AllowURLSchemes(schemes ...string) *Policy { + p.init() + + p.RequireParseableURLs(true) + + for _, scheme := range schemes { + scheme = strings.ToLower(scheme) + + // Allow all URLs with matching scheme. + p.allowURLSchemes[scheme] = nil + } + + return p +} + +// AllowURLSchemeWithCustomPolicy will append URL schemes with +// a custom URL policy to the whitelist. +// Only the URLs with matching schema and urlPolicy(url) +// returning true will be allowed. +func (p *Policy) AllowURLSchemeWithCustomPolicy( + scheme string, + urlPolicy func(url *url.URL) (allowUrl bool), +) *Policy { + + p.init() + + p.RequireParseableURLs(true) + + scheme = strings.ToLower(scheme) + + p.allowURLSchemes[scheme] = urlPolicy + + return p +} + +// AddSpaceWhenStrippingTag states whether to add a single space " " when +// removing tags that are not whitelisted by the policy. +// +// This is useful if you expect to strip tags in dense markup and may lose the +// value of whitespace. +// +// For example: "

Hello

World

"" would be sanitized to "HelloWorld" +// with the default value of false, but you may wish to sanitize this to +// " Hello World " by setting AddSpaceWhenStrippingTag to true as this would +// retain the intent of the text. +func (p *Policy) AddSpaceWhenStrippingTag(allow bool) *Policy { + + p.addSpaces = allow + + return p +} + +// SkipElementsContent adds the HTML elements whose tags is needed to be removed +// with its content. +func (p *Policy) SkipElementsContent(names ...string) *Policy { + + p.init() + + for _, element := range names { + element = strings.ToLower(element) + + if _, ok := p.setOfElementsToSkipContent[element]; !ok { + p.setOfElementsToSkipContent[element] = struct{}{} + } + } + + return p +} + +// AllowElementsContent marks the HTML elements whose content should be +// retained after removing the tag. +func (p *Policy) AllowElementsContent(names ...string) *Policy { + + p.init() + + for _, element := range names { + delete(p.setOfElementsToSkipContent, strings.ToLower(element)) + } + + return p +} + +// addDefaultElementsWithoutAttrs adds the HTML elements that we know are valid +// without any attributes to an internal map. +// i.e. we know that
is valid, but isn't valid as the "dir" attr +// is mandatory +func (p *Policy) addDefaultElementsWithoutAttrs() { + p.init() + + p.setOfElementsAllowedWithoutAttrs["abbr"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["acronym"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["address"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["article"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["aside"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["audio"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["b"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["bdi"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["blockquote"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["body"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["br"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["button"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["canvas"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["caption"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["center"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["cite"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["code"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["col"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["colgroup"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["datalist"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["dd"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["del"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["details"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["dfn"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["div"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["dl"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["dt"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["em"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["fieldset"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["figcaption"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["figure"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["footer"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["h1"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["h2"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["h3"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["h4"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["h5"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["h6"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["head"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["header"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["hgroup"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["hr"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["html"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["i"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["ins"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["kbd"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["li"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["mark"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["marquee"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["nav"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["ol"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["optgroup"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["option"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["p"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["pre"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["q"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["rp"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["rt"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["ruby"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["s"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["samp"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["script"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["section"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["select"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["small"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["span"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["strike"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["strong"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["style"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["sub"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["summary"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["sup"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["svg"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["table"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["tbody"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["td"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["textarea"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["tfoot"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["th"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["thead"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["title"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["time"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["tr"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["tt"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["u"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["ul"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["var"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["video"] = struct{}{} + p.setOfElementsAllowedWithoutAttrs["wbr"] = struct{}{} + +} + +// addDefaultSkipElementContent adds the HTML elements that we should skip +// rendering the character content of, if the element itself is not allowed. +// This is all character data that the end user would not normally see. +// i.e. if we exclude a tag. +func (p *Policy) addDefaultSkipElementContent() { + p.init() + + p.setOfElementsToSkipContent["frame"] = struct{}{} + p.setOfElementsToSkipContent["frameset"] = struct{}{} + p.setOfElementsToSkipContent["iframe"] = struct{}{} + p.setOfElementsToSkipContent["noembed"] = struct{}{} + p.setOfElementsToSkipContent["noframes"] = struct{}{} + p.setOfElementsToSkipContent["noscript"] = struct{}{} + p.setOfElementsToSkipContent["nostyle"] = struct{}{} + p.setOfElementsToSkipContent["object"] = struct{}{} + p.setOfElementsToSkipContent["script"] = struct{}{} + p.setOfElementsToSkipContent["style"] = struct{}{} + p.setOfElementsToSkipContent["title"] = struct{}{} +} diff --git a/vendor/github.com/microcosm-cc/bluemonday/sanitize.go b/vendor/github.com/microcosm-cc/bluemonday/sanitize.go new file mode 100644 index 0000000..65ed89b --- /dev/null +++ b/vendor/github.com/microcosm-cc/bluemonday/sanitize.go @@ -0,0 +1,581 @@ +// Copyright (c) 2014, David Kitchen +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * Neither the name of the organisation (Microcosm) nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package bluemonday + +import ( + "bytes" + "io" + "net/url" + "regexp" + "strings" + + "golang.org/x/net/html" +) + +var ( + dataAttribute = regexp.MustCompile("^data-.+") + dataAttributeXMLPrefix = regexp.MustCompile("^xml.+") + dataAttributeInvalidChars = regexp.MustCompile("[A-Z;]+") +) + +// Sanitize takes a string that contains a HTML fragment or document and applies +// the given policy whitelist. +// +// It returns a HTML string that has been sanitized by the policy or an empty +// string if an error has occurred (most likely as a consequence of extremely +// malformed input) +func (p *Policy) Sanitize(s string) string { + if strings.TrimSpace(s) == "" { + return s + } + + return p.sanitize(strings.NewReader(s)).String() +} + +// SanitizeBytes takes a []byte that contains a HTML fragment or document and applies +// the given policy whitelist. +// +// It returns a []byte containing the HTML that has been sanitized by the policy +// or an empty []byte if an error has occurred (most likely as a consequence of +// extremely malformed input) +func (p *Policy) SanitizeBytes(b []byte) []byte { + if len(bytes.TrimSpace(b)) == 0 { + return b + } + + return p.sanitize(bytes.NewReader(b)).Bytes() +} + +// SanitizeReader takes an io.Reader that contains a HTML fragment or document +// and applies the given policy whitelist. +// +// It returns a bytes.Buffer containing the HTML that has been sanitized by the +// policy. Errors during sanitization will merely return an empty result. +func (p *Policy) SanitizeReader(r io.Reader) *bytes.Buffer { + return p.sanitize(r) +} + +// Performs the actual sanitization process. +func (p *Policy) sanitize(r io.Reader) *bytes.Buffer { + + // It is possible that the developer has created the policy via: + // p := bluemonday.Policy{} + // rather than: + // p := bluemonday.NewPolicy() + // If this is the case, and if they haven't yet triggered an action that + // would initiliaze the maps, then we need to do that. + p.init() + + var ( + buff bytes.Buffer + skipElementContent bool + skippingElementsCount int64 + skipClosingTag bool + closingTagToSkipStack []string + mostRecentlyStartedToken string + ) + + tokenizer := html.NewTokenizer(r) + for { + if tokenizer.Next() == html.ErrorToken { + err := tokenizer.Err() + if err == io.EOF { + // End of input means end of processing + return &buff + } + + // Raw tokenizer error + return &bytes.Buffer{} + } + + token := tokenizer.Token() + switch token.Type { + case html.DoctypeToken: + + // DocType is not handled as there is no safe parsing mechanism + // provided by golang.org/x/net/html for the content, and this can + // be misused to insert HTML tags that are not then sanitized + // + // One might wish to recursively sanitize here using the same policy + // but I will need to do some further testing before considering + // this. + + case html.CommentToken: + + // Comments are ignored by default + + case html.StartTagToken: + + mostRecentlyStartedToken = token.Data + + aps, ok := p.elsAndAttrs[token.Data] + if !ok { + if _, ok := p.setOfElementsToSkipContent[token.Data]; ok { + skipElementContent = true + skippingElementsCount++ + } + if p.addSpaces { + buff.WriteString(" ") + } + break + } + + if len(token.Attr) != 0 { + token.Attr = p.sanitizeAttrs(token.Data, token.Attr, aps) + } + + if len(token.Attr) == 0 { + if !p.allowNoAttrs(token.Data) { + skipClosingTag = true + closingTagToSkipStack = append(closingTagToSkipStack, token.Data) + if p.addSpaces { + buff.WriteString(" ") + } + break + } + } + + if !skipElementContent { + buff.WriteString(token.String()) + } + + case html.EndTagToken: + + if mostRecentlyStartedToken == token.Data { + mostRecentlyStartedToken = "" + } + + if skipClosingTag && closingTagToSkipStack[len(closingTagToSkipStack)-1] == token.Data { + closingTagToSkipStack = closingTagToSkipStack[:len(closingTagToSkipStack)-1] + if len(closingTagToSkipStack) == 0 { + skipClosingTag = false + } + if p.addSpaces { + buff.WriteString(" ") + } + break + } + + if _, ok := p.elsAndAttrs[token.Data]; !ok { + if _, ok := p.setOfElementsToSkipContent[token.Data]; ok { + skippingElementsCount-- + if skippingElementsCount == 0 { + skipElementContent = false + } + } + if p.addSpaces { + buff.WriteString(" ") + } + break + } + + if !skipElementContent { + buff.WriteString(token.String()) + } + + case html.SelfClosingTagToken: + + aps, ok := p.elsAndAttrs[token.Data] + if !ok { + if p.addSpaces { + buff.WriteString(" ") + } + break + } + + if len(token.Attr) != 0 { + token.Attr = p.sanitizeAttrs(token.Data, token.Attr, aps) + } + + if len(token.Attr) == 0 && !p.allowNoAttrs(token.Data) { + if p.addSpaces { + buff.WriteString(" ") + } + break + } + + if !skipElementContent { + buff.WriteString(token.String()) + } + + case html.TextToken: + + if !skipElementContent { + switch mostRecentlyStartedToken { + case "script": + // not encouraged, but if a policy allows JavaScript we + // should not HTML escape it as that would break the output + buff.WriteString(token.Data) + case "style": + // not encouraged, but if a policy allows CSS styles we + // should not HTML escape it as that would break the output + buff.WriteString(token.Data) + default: + // HTML escape the text + buff.WriteString(token.String()) + } + } + default: + // A token that didn't exist in the html package when we wrote this + return &bytes.Buffer{} + } + } +} + +// sanitizeAttrs takes a set of element attribute policies and the global +// attribute policies and applies them to the []html.Attribute returning a set +// of html.Attributes that match the policies +func (p *Policy) sanitizeAttrs( + elementName string, + attrs []html.Attribute, + aps map[string]attrPolicy, +) []html.Attribute { + + if len(attrs) == 0 { + return attrs + } + + // Builds a new attribute slice based on the whether the attribute has been + // whitelisted explicitly or globally. + cleanAttrs := []html.Attribute{} + for _, htmlAttr := range attrs { + if p.allowDataAttributes { + // If we see a data attribute, let it through. + if isDataAttribute(htmlAttr.Key) { + cleanAttrs = append(cleanAttrs, htmlAttr) + continue + } + } + // Is there an element specific attribute policy that applies? + if ap, ok := aps[htmlAttr.Key]; ok { + if ap.regexp != nil { + if ap.regexp.MatchString(htmlAttr.Val) { + cleanAttrs = append(cleanAttrs, htmlAttr) + continue + } + } else { + cleanAttrs = append(cleanAttrs, htmlAttr) + continue + } + } + + // Is there a global attribute policy that applies? + if ap, ok := p.globalAttrs[htmlAttr.Key]; ok { + + if ap.regexp != nil { + if ap.regexp.MatchString(htmlAttr.Val) { + cleanAttrs = append(cleanAttrs, htmlAttr) + } + } else { + cleanAttrs = append(cleanAttrs, htmlAttr) + } + } + } + + if len(cleanAttrs) == 0 { + // If nothing was allowed, let's get out of here + return cleanAttrs + } + // cleanAttrs now contains the attributes that are permitted + + if linkable(elementName) { + if p.requireParseableURLs { + // Ensure URLs are parseable: + // - a.href + // - area.href + // - link.href + // - blockquote.cite + // - q.cite + // - img.src + // - script.src + tmpAttrs := []html.Attribute{} + for _, htmlAttr := range cleanAttrs { + switch elementName { + case "a", "area", "link": + if htmlAttr.Key == "href" { + if u, ok := p.validURL(htmlAttr.Val); ok { + htmlAttr.Val = u + tmpAttrs = append(tmpAttrs, htmlAttr) + } + break + } + tmpAttrs = append(tmpAttrs, htmlAttr) + case "blockquote", "q": + if htmlAttr.Key == "cite" { + if u, ok := p.validURL(htmlAttr.Val); ok { + htmlAttr.Val = u + tmpAttrs = append(tmpAttrs, htmlAttr) + } + break + } + tmpAttrs = append(tmpAttrs, htmlAttr) + case "img", "script": + if htmlAttr.Key == "src" { + if u, ok := p.validURL(htmlAttr.Val); ok { + htmlAttr.Val = u + tmpAttrs = append(tmpAttrs, htmlAttr) + } + break + } + tmpAttrs = append(tmpAttrs, htmlAttr) + default: + tmpAttrs = append(tmpAttrs, htmlAttr) + } + } + cleanAttrs = tmpAttrs + } + + if (p.requireNoFollow || + p.requireNoFollowFullyQualifiedLinks || + p.addTargetBlankToFullyQualifiedLinks) && + len(cleanAttrs) > 0 { + + // Add rel="nofollow" if a "href" exists + switch elementName { + case "a", "area", "link": + var hrefFound bool + var externalLink bool + for _, htmlAttr := range cleanAttrs { + if htmlAttr.Key == "href" { + hrefFound = true + + u, err := url.Parse(htmlAttr.Val) + if err != nil { + continue + } + if u.Host != "" { + externalLink = true + } + + continue + } + } + + if hrefFound { + var ( + noFollowFound bool + targetBlankFound bool + ) + + addNoFollow := (p.requireNoFollow || + externalLink && p.requireNoFollowFullyQualifiedLinks) + + addTargetBlank := (externalLink && + p.addTargetBlankToFullyQualifiedLinks) + + tmpAttrs := []html.Attribute{} + for _, htmlAttr := range cleanAttrs { + + var appended bool + if htmlAttr.Key == "rel" && addNoFollow { + + if strings.Contains(htmlAttr.Val, "nofollow") { + noFollowFound = true + tmpAttrs = append(tmpAttrs, htmlAttr) + appended = true + } else { + htmlAttr.Val += " nofollow" + noFollowFound = true + tmpAttrs = append(tmpAttrs, htmlAttr) + appended = true + } + } + + if elementName == "a" && htmlAttr.Key == "target" { + if htmlAttr.Val == "_blank" { + targetBlankFound = true + } + if addTargetBlank && !targetBlankFound { + htmlAttr.Val = "_blank" + targetBlankFound = true + tmpAttrs = append(tmpAttrs, htmlAttr) + appended = true + } + } + + if !appended { + tmpAttrs = append(tmpAttrs, htmlAttr) + } + } + if noFollowFound || targetBlankFound { + cleanAttrs = tmpAttrs + } + + if addNoFollow && !noFollowFound { + rel := html.Attribute{} + rel.Key = "rel" + rel.Val = "nofollow" + cleanAttrs = append(cleanAttrs, rel) + } + + if elementName == "a" && addTargetBlank && !targetBlankFound { + rel := html.Attribute{} + rel.Key = "target" + rel.Val = "_blank" + targetBlankFound = true + cleanAttrs = append(cleanAttrs, rel) + } + + if targetBlankFound { + // target="_blank" has a security risk that allows the + // opened window/tab to issue JavaScript calls against + // window.opener, which in effect allow the destination + // of the link to control the source: + // https://dev.to/ben/the-targetblank-vulnerability-by-example + // + // To mitigate this risk, we need to add a specific rel + // attribute if it is not already present. + // rel="noopener" + // + // Unfortunately this is processing the rel twice (we + // already looked at it earlier ^^) as we cannot be sure + // of the ordering of the href and rel, and whether we + // have fully satisfied that we need to do this. This + // double processing only happens *if* target="_blank" + // is true. + var noOpenerAdded bool + tmpAttrs := []html.Attribute{} + for _, htmlAttr := range cleanAttrs { + var appended bool + if htmlAttr.Key == "rel" { + if strings.Contains(htmlAttr.Val, "noopener") { + noOpenerAdded = true + tmpAttrs = append(tmpAttrs, htmlAttr) + } else { + htmlAttr.Val += " noopener" + noOpenerAdded = true + tmpAttrs = append(tmpAttrs, htmlAttr) + } + + appended = true + } + if !appended { + tmpAttrs = append(tmpAttrs, htmlAttr) + } + } + if noOpenerAdded { + cleanAttrs = tmpAttrs + } else { + // rel attr was not found, or else noopener would + // have been added already + rel := html.Attribute{} + rel.Key = "rel" + rel.Val = "noopener" + cleanAttrs = append(cleanAttrs, rel) + } + + } + } + default: + } + } + } + + return cleanAttrs +} + +func (p *Policy) allowNoAttrs(elementName string) bool { + _, ok := p.setOfElementsAllowedWithoutAttrs[elementName] + return ok +} + +func (p *Policy) validURL(rawurl string) (string, bool) { + if p.requireParseableURLs { + // URLs are valid if when space is trimmed the URL is valid + rawurl = strings.TrimSpace(rawurl) + + // URLs cannot contain whitespace, unless it is a data-uri + if (strings.Contains(rawurl, " ") || + strings.Contains(rawurl, "\t") || + strings.Contains(rawurl, "\n")) && + !strings.HasPrefix(rawurl, `data:`) { + return "", false + } + + // URLs are valid if they parse + u, err := url.Parse(rawurl) + if err != nil { + return "", false + } + + if u.Scheme != "" { + + urlPolicy, ok := p.allowURLSchemes[u.Scheme] + if !ok { + return "", false + + } + + if urlPolicy == nil || urlPolicy(u) == true { + return u.String(), true + } + + return "", false + } + + if p.allowRelativeURLs { + if u.String() != "" { + return u.String(), true + } + } + + return "", false + } + + return rawurl, true +} + +func linkable(elementName string) bool { + switch elementName { + case "a", "area", "blockquote", "img", "link", "script": + return true + default: + return false + } +} + +func isDataAttribute(val string) bool { + if !dataAttribute.MatchString(val) { + return false + } + rest := strings.Split(val, "data-") + if len(rest) == 1 { + return false + } + // data-xml* is invalid. + if dataAttributeXMLPrefix.MatchString(rest[1]) { + return false + } + // no uppercase or semi-colons allowed. + if dataAttributeInvalidChars.MatchString(rest[1]) { + return false + } + return true +} diff --git a/vendor/github.com/minio/minio-go/.gitignore b/vendor/github.com/minio/minio-go/.gitignore new file mode 100644 index 0000000..fa967ab --- /dev/null +++ b/vendor/github.com/minio/minio-go/.gitignore @@ -0,0 +1,3 @@ +*~ +*.test +validator diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml new file mode 100644 index 0000000..7ed7df1 --- /dev/null +++ b/vendor/github.com/minio/minio-go/.travis.yml @@ -0,0 +1,28 @@ +sudo: false +language: go + +os: +- linux + +env: +- ARCH=x86_64 +- ARCH=i686 + +go: +- 1.11.x +- tip + +matrix: + fast_finish: true + allow_failures: + - go: tip + +addons: + apt: + packages: + - devscripts + +script: +- diff -au <(gofmt -d .) <(printf "") +- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "") +- make diff --git a/vendor/github.com/minio/minio-go/CONTRIBUTING.md b/vendor/github.com/minio/minio-go/CONTRIBUTING.md new file mode 100644 index 0000000..8b1ee86 --- /dev/null +++ b/vendor/github.com/minio/minio-go/CONTRIBUTING.md @@ -0,0 +1,23 @@ + +### Developer Guidelines + +``minio-go`` welcomes your contribution. To make the process as seamless as possible, we ask for the following: + +* Go ahead and fork the project and make your changes. We encourage pull requests to discuss code changes. + - Fork it + - Create your feature branch (git checkout -b my-new-feature) + - Commit your changes (git commit -am 'Add some feature') + - Push to the branch (git push origin my-new-feature) + - Create new Pull Request + +* When you're ready to create a pull request, be sure to: + - Have test cases for the new code. If you have questions about how to do it, please ask in your pull request. + - Run `go fmt` + - Squash your commits into a single commit. `git rebase -i`. It's okay to force update your pull request. + - Make sure `go test -race ./...` and `go build` completes. + NOTE: go test runs functional tests and requires you to have a AWS S3 account. Set them as environment variables + ``ACCESS_KEY`` and ``SECRET_KEY``. To run shorter version of the tests please use ``go test -short -race ./...`` + +* Read [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project + - `minio-go` project is strictly conformant with Golang style + - if you happen to observe offending code, please feel free to send a pull request diff --git a/vendor/github.com/minio/minio-go/LICENSE b/vendor/github.com/minio/minio-go/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/minio/minio-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/minio-go/MAINTAINERS.md b/vendor/github.com/minio/minio-go/MAINTAINERS.md new file mode 100644 index 0000000..1797307 --- /dev/null +++ b/vendor/github.com/minio/minio-go/MAINTAINERS.md @@ -0,0 +1,35 @@ +# For maintainers only + +## Responsibilities + +Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522) + +### Making new releases +Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key. +```sh +$ export GNUPGHOME=/media/${USER}/minio/trusted +$ git tag -s 4.0.0 +$ git push +$ git push --tags +``` + +### Update version +Once release has been made update `libraryVersion` constant in `api.go` to next to be released version. + +```sh +$ grep libraryVersion api.go + libraryVersion = "4.0.1" +``` + +Commit your changes +``` +$ git commit -a -m "Update version for next release" --author "Minio Trusted " +``` + +### Announce +Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release. + +To generate `changelog` +```sh +$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' .. +``` diff --git a/vendor/github.com/minio/minio-go/Makefile b/vendor/github.com/minio/minio-go/Makefile new file mode 100644 index 0000000..bad81ff --- /dev/null +++ b/vendor/github.com/minio/minio-go/Makefile @@ -0,0 +1,15 @@ +all: checks + +checks: + @go get -t ./... + @go vet ./... + @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./... + @go get github.com/dustin/go-humanize/... + @go get github.com/sirupsen/logrus/... + @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go + @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done + @go get -u github.com/a8m/mark/... + @go get -u github.com/minio/cli/... + @go get -u golang.org/x/tools/cmd/goimports + @go get -u github.com/gernest/wow/... + @go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl diff --git a/vendor/github.com/minio/minio-go/NOTICE b/vendor/github.com/minio/minio-go/NOTICE new file mode 100644 index 0000000..c521791 --- /dev/null +++ b/vendor/github.com/minio/minio-go/NOTICE @@ -0,0 +1,2 @@ +minio-go +Copyright 2015-2017 Minio, Inc. \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md new file mode 100644 index 0000000..ad9d5e6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/README.md @@ -0,0 +1,239 @@ +# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE) + +The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. + +This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). + +This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). + +## Download from Github +```sh +go get -u github.com/minio/minio-go +``` + +## Initialize Minio Client +Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. + +| Parameter | Description| +| :--- | :--- | +| endpoint | URL to object storage service. | +| accessKeyID | Access key is the user ID that uniquely identifies your account. | +| secretAccessKey | Secret key is the password to your account. | +| secure | Set this value to 'true' to enable secure (HTTPS) access. | + + +```go +package main + +import ( + "github.com/minio/minio-go" + "log" +) + +func main() { + endpoint := "play.minio.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient is now setup +} +``` + +## Quick Start Example - File Uploader +This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. + +We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. + +### FileUploader.go +```go +package main + +import ( + "github.com/minio/minio-go" + "log" +) + +func main() { + endpoint := "play.minio.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + // Make a new bucket called mymusic. + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(bucketName, location) + if err != nil { + // Check to see if we already own this bucket (which happens if you run this twice) + exists, err := minioClient.BucketExists(bucketName) + if err == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } else { + log.Printf("Successfully created %s\n", bucketName) + } + + // Upload the zip file + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // Upload the zip file with FPutObject + n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, n) +} +``` + +### Run FileUploader +```sh +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` + +## API Reference +The full API Reference is available here. + +* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference) + +### API Reference : Bucket Operations +* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) +* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) +* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API Reference : Bucket policy Operations +* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) + +### API Reference : Bucket notification Operations +* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) + +### API Reference : File Object Operations +* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject) +* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) +* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) + +### API Reference : Object Operations +* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) +* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) +* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) +* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://docs.minio.io/docs/golang-client-api-reference#SelectObjectContent) + + +### API Reference : Presigned Operations +* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API Reference : Client custom settings +* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) +* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) +* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) + +## Full Examples + +### Full Examples : Bucket Operations +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### Full Examples : Bucket policy Operations +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### Full Examples : Bucket lifecycle Operations +* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + +### Full Examples : Bucket notification Operations +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension) + +### Full Examples : File Object Operations +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) + +### Full Examples : Object Operations +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### Full Examples : Encrypted Object Operations +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### Full Examples : Presigned Operations +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## Explore Further +* [Complete Documentation](https://docs.minio.io) +* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference) +* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app) + +## Contribute +[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) + +[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) +[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) + +## License +This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information. diff --git a/vendor/github.com/minio/minio-go/README_zh_CN.md b/vendor/github.com/minio/minio-go/README_zh_CN.md new file mode 100644 index 0000000..a5acf19 --- /dev/null +++ b/vendor/github.com/minio/minio-go/README_zh_CN.md @@ -0,0 +1,245 @@ +# 适用于与Amazon S3兼容云存储的Minio Go SDK [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) + +Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。 + +**支持的云存储:** + +- AWS Signature Version 4 + - Amazon S3 + - Minio + +- AWS Signature Version 2 + - Google Cloud Storage (兼容模式) + - Openstack Swift + Swift3 middleware + - Ceph Object Gateway + - Riak CS + +本文我们将学习如何安装Minio client SDK,连接到Minio,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference)。 + +本文假设你已经有 [Go开发环境](https://docs.minio.io/docs/how-to-install-golang)。 + +## 从Github下载 +```sh +go get -u github.com/minio/minio-go +``` + +## 初始化Minio Client +Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。 + +| 参数 | 描述| +| :--- | :--- | +| endpoint | 对象存储服务的URL | +| accessKeyID | Access key是唯一标识你的账户的用户ID。 | +| secretAccessKey | Secret key是你账户的密码。 | +| secure | true代表使用HTTPS | + + +```go +package main + +import ( + "github.com/minio/minio-go" + "log" +) + +func main() { + endpoint := "play.minio.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // 初使化 minio client对象。 + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + log.Printf("%#v\n", minioClient) // minioClient初使化成功 +} +``` + +## 示例-文件上传 +本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。 + +我们在本示例中使用运行在 [https://play.minio.io:9000](https://play.minio.io:9000) 上的Minio服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。 + +### FileUploader.go +```go +package main + +import ( + "github.com/minio/minio-go" + "log" +) + +func main() { + endpoint := "play.minio.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + + // 初使化minio client对象。 + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + // 创建一个叫mymusic的存储桶。 + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(bucketName, location) + if err != nil { + // 检查存储桶是否已经存在。 + exists, err := minioClient.BucketExists(bucketName) + if err == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } + } + log.Printf("Successfully created %s\n", bucketName) + + // 上传一个zip文件。 + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // 使用FPutObject上传一个zip文件。 + n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType}) + if err != nil { + log.Fatalln(err) + } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, n) +} +``` + +### 运行FileUploader +```sh +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 + +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` + +## API文档 +完整的API文档在这里。 +* [完整API文档](https://docs.minio.io/docs/golang-client-api-reference) + +### API文档 : 操作存储桶 +* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) +* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) +* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) + +### API文档 : 存储桶策略 +* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) + +### API文档 : 存储桶通知 +* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) + +### API文档 : 操作文件对象 +* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) +* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) +* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) + +### API文档 : 操作对象 +* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) +* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext) +* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext) +* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) + +### API文档: 操作加密对象 +* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject) +* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject) + +### API文档 : Presigned操作 +* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject) +* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API文档 : 客户端自定义设置 +* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) +* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) +* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) + +## 完整示例 + +### 完整示例 : 操作存储桶 +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +### 完整示例 : 存储桶策略 +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +### 完整示例 : 存储桶通知 +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio扩展) + +### 完整示例 : 操作文件对象 +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) +* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go) +* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go) + +### 完整示例 : 操作对象 +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go) +* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +### 完整示例 : 操作加密对象 +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) +* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go) + +### 完整示例 : Presigned操作 +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## 了解更多 +* [完整文档](https://docs.minio.io) +* [Minio Go Client SDK API文档](https://docs.minio.io/docs/golang-client-api-reference) +* [Go 音乐播放器完整示例](https://docs.minio.io/docs/go-music-player-app) + +## 贡献 +[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md) + +[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) +[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) + diff --git a/vendor/github.com/minio/minio-go/api-compose-object.go b/vendor/github.com/minio/minio-go/api-compose-object.go new file mode 100644 index 0000000..3ac36c5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-compose-object.go @@ -0,0 +1,565 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/s3utils" +) + +// DestinationInfo - type with information about the object to be +// created via server-side copy requests, using the Compose API. +type DestinationInfo struct { + bucket, object string + encryption encrypt.ServerSide + + // if no user-metadata is provided, it is copied from source + // (when there is only once source object in the compose + // request) + userMetadata map[string]string +} + +// NewDestinationInfo - creates a compose-object/copy-source +// destination info object. +// +// `encSSEC` is the key info for server-side-encryption with customer +// provided key. If it is nil, no encryption is performed. +// +// `userMeta` is the user-metadata key-value pairs to be set on the +// destination. The keys are automatically prefixed with `x-amz-meta-` +// if needed. If nil is passed, and if only a single source (of any +// size) is provided in the ComposeObject call, then metadata from the +// source is copied to the destination. +func NewDestinationInfo(bucket, object string, sse encrypt.ServerSide, userMeta map[string]string) (d DestinationInfo, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucket); err != nil { + return d, err + } + if err = s3utils.CheckValidObjectName(object); err != nil { + return d, err + } + + // Process custom-metadata to remove a `x-amz-meta-` prefix if + // present and validate that keys are distinct (after this + // prefix removal). + m := make(map[string]string) + for k, v := range userMeta { + if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") { + k = k[len("x-amz-meta-"):] + } + if _, ok := m[k]; ok { + return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)) + } + m[k] = v + } + + return DestinationInfo{ + bucket: bucket, + object: object, + encryption: sse, + userMetadata: m, + }, nil +} + +// getUserMetaHeadersMap - construct appropriate key-value pairs to send +// as headers from metadata map to pass into copy-object request. For +// single part copy-object (i.e. non-multipart object), enable the +// withCopyDirectiveHeader to set the `x-amz-metadata-directive` to +// `REPLACE`, so that metadata headers from the source are not copied +// over. +func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) map[string]string { + if len(d.userMetadata) == 0 { + return nil + } + r := make(map[string]string) + if withCopyDirectiveHeader { + r["x-amz-metadata-directive"] = "REPLACE" + } + for k, v := range d.userMetadata { + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + r[k] = v + } else { + r["x-amz-meta-"+k] = v + } + } + return r +} + +// SourceInfo - represents a source object to be copied, using +// server-side copying APIs. +type SourceInfo struct { + bucket, object string + start, end int64 + encryption encrypt.ServerSide + // Headers to send with the upload-part-copy request involving + // this source object. + Headers http.Header +} + +// NewSourceInfo - create a compose-object/copy-object source info +// object. +// +// `decryptSSEC` is the decryption key using server-side-encryption +// with customer provided key. It may be nil if the source is not +// encrypted. +func NewSourceInfo(bucket, object string, sse encrypt.ServerSide) SourceInfo { + r := SourceInfo{ + bucket: bucket, + object: object, + start: -1, // range is unspecified by default + encryption: sse, + Headers: make(http.Header), + } + + // Set the source header + r.Headers.Set("x-amz-copy-source", s3utils.EncodePath(bucket+"/"+object)) + return r +} + +// SetRange - Set the start and end offset of the source object to be +// copied. If this method is not called, the whole source object is +// copied. +func (s *SourceInfo) SetRange(start, end int64) error { + if start > end || start < 0 { + return ErrInvalidArgument("start must be non-negative, and start must be at most end.") + } + // Note that 0 <= start <= end + s.start, s.end = start, end + return nil +} + +// SetMatchETagCond - Set ETag match condition. The object is copied +// only if the etag of the source matches the value given here. +func (s *SourceInfo) SetMatchETagCond(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + s.Headers.Set("x-amz-copy-source-if-match", etag) + return nil +} + +// SetMatchETagExceptCond - Set the ETag match exception +// condition. The object is copied only if the etag of the source is +// not the value given here. +func (s *SourceInfo) SetMatchETagExceptCond(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + s.Headers.Set("x-amz-copy-source-if-none-match", etag) + return nil +} + +// SetModifiedSinceCond - Set the modified since condition. +func (s *SourceInfo) SetModifiedSinceCond(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Input time cannot be 0.") + } + s.Headers.Set("x-amz-copy-source-if-modified-since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetUnmodifiedSinceCond - Set the unmodified since condition. +func (s *SourceInfo) SetUnmodifiedSinceCond(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Input time cannot be 0.") + } + s.Headers.Set("x-amz-copy-source-if-unmodified-since", modTime.Format(http.TimeFormat)) + return nil +} + +// Helper to fetch size and etag of an object using a StatObject call. +func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[string]string, err error) { + // Get object info - need size and etag here. Also, decryption + // headers are added to the stat request if given. + var objInfo ObjectInfo + opts := StatObjectOptions{GetObjectOptions{ServerSideEncryption: encrypt.SSE(s.encryption)}} + objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts) + if err != nil { + err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)) + } else { + size = objInfo.Size + etag = objInfo.ETag + userMeta = make(map[string]string) + for k, v := range objInfo.Metadata { + if strings.HasPrefix(k, "x-amz-meta-") { + if len(v) > 0 { + userMeta[k] = v[0] + } + } + } + } + return +} + +// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy. +func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, + metadata map[string]string) (ObjectInfo, error) { + + // Build headers. + headers := make(http.Header) + + // Set all the metadata headers. + for k, v := range metadata { + headers.Set(k, v) + } + + // Set the source header + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject) + } + + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return ObjectInfo{}, err + } + + objInfo := ObjectInfo{ + Key: destObject, + ETag: strings.Trim(cpObjRes.ETag, "\""), + LastModified: cpObjRes.LastModified, + } + return objInfo, nil +} + +func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) { + + headers := make(http.Header) + + // Set source + headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject)) + + if startOffset < 0 { + return p, ErrInvalidArgument("startOffset must be non-negative") + } + + if length >= 0 { + headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1)) + } + + for k, v := range metadata { + headers.Set(k, v) + } + + queryValues := make(url.Values) + queryValues.Set("partNumber", strconv.Itoa(partID)) + queryValues.Set("uploadId", uploadID) + + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + bucketName: destBucket, + objectName: destObject, + customHeader: headers, + queryValues: queryValues, + }) + defer closeResponse(resp) + if err != nil { + return + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, destBucket, destObject) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partID, cpObjRes.ETag + return p, nil +} + +// uploadPartCopy - helper function to create a part in a multipart +// upload via an upload-part-copy request +// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html +func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int, + headers http.Header) (p CompletePart, err error) { + + // Build query parameters + urlValues := make(url.Values) + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + urlValues.Set("uploadId", uploadID) + + // Send upload-part-copy request + resp, err := c.executeMethod(ctx, "PUT", requestMetadata{ + bucketName: bucket, + objectName: object, + customHeader: headers, + queryValues: urlValues, + }) + defer closeResponse(resp) + if err != nil { + return p, err + } + + // Check if we got an error response. + if resp.StatusCode != http.StatusOK { + return p, httpRespToErrorResponse(resp, bucket, object) + } + + // Decode copy-part response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return p, err + } + p.PartNumber, p.ETag = partNumber, cpObjRes.ETag + return p, nil +} + +// ComposeObjectWithProgress - creates an object using server-side copying of +// existing objects. It takes a list of source objects (with optional +// offsets) and concatenates them into a new object using only +// server-side copying operations. Optionally takes progress reader hook +// for applications to look at current progress. +func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo, progress io.Reader) error { + if len(srcs) < 1 || len(srcs) > maxPartsCount { + return ErrInvalidArgument("There must be as least one and up to 10000 source objects.") + } + ctx := context.Background() + srcSizes := make([]int64, len(srcs)) + var totalSize, size, totalParts int64 + var srcUserMeta map[string]string + etags := make([]string, len(srcs)) + var err error + for i, src := range srcs { + size, etags[i], srcUserMeta, err = src.getProps(c) + if err != nil { + return err + } + + // Error out if client side encryption is used in this source object when + // more than one source objects are given. + if len(srcs) > 1 && src.Headers.Get("x-amz-meta-x-amz-key") != "" { + return ErrInvalidArgument( + fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object)) + } + + // Check if a segment is specified, and if so, is the + // segment within object bounds? + if src.start != -1 { + // Since range is specified, + // 0 <= src.start <= src.end + // so only invalid case to check is: + if src.end >= size { + return ErrInvalidArgument( + fmt.Sprintf("SourceInfo %d has invalid segment-to-copy [%d, %d] (size is %d)", + i, src.start, src.end, size)) + } + size = src.end - src.start + 1 + } + + // Only the last source may be less than `absMinPartSize` + if size < absMinPartSize && i < len(srcs)-1 { + return ErrInvalidArgument( + fmt.Sprintf("SourceInfo %d is too small (%d) and it is not the last part", i, size)) + } + + // Is data to copy too large? + totalSize += size + if totalSize > maxMultipartPutObjectSize { + return ErrInvalidArgument(fmt.Sprintf("Cannot compose an object of size %d (> 5TiB)", totalSize)) + } + + // record source size + srcSizes[i] = size + + // calculate parts needed for current source + totalParts += partsRequired(size) + // Do we need more parts than we are allowed? + if totalParts > maxPartsCount { + return ErrInvalidArgument(fmt.Sprintf( + "Your proposed compose object requires more than %d parts", maxPartsCount)) + } + } + + // Single source object case (i.e. when only one source is + // involved, it is being copied wholly and at most 5GiB in + // size, emptyfiles are also supported). + if (totalParts == 1 && srcs[0].start == -1 && totalSize <= maxPartSize) || (totalSize == 0) { + return c.CopyObjectWithProgress(dst, srcs[0], progress) + } + + // Now, handle multipart-copy cases. + + // 1. Ensure that the object has not been changed while + // we are copying data. + for i, src := range srcs { + if src.Headers.Get("x-amz-copy-source-if-match") == "" { + src.SetMatchETagCond(etags[i]) + } + } + + // 2. Initiate a new multipart upload. + + // Set user-metadata on the destination object. If no + // user-metadata is specified, and there is only one source, + // (only) then metadata from source is copied. + userMeta := dst.getUserMetaHeadersMap(false) + metaMap := userMeta + if len(userMeta) == 0 && len(srcs) == 1 { + metaMap = srcUserMeta + } + metaHeaders := make(map[string]string) + for k, v := range metaMap { + metaHeaders[k] = v + } + + uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{ServerSideEncryption: dst.encryption, UserMetadata: metaHeaders}) + if err != nil { + return err + } + + // 3. Perform copy part uploads + objParts := []CompletePart{} + partIndex := 1 + for i, src := range srcs { + h := src.Headers + if src.encryption != nil { + encrypt.SSECopy(src.encryption).Marshal(h) + } + // Add destination encryption headers + if dst.encryption != nil { + dst.encryption.Marshal(h) + } + + // calculate start/end indices of parts after + // splitting. + startIdx, endIdx := calculateEvenSplits(srcSizes[i], src) + for j, start := range startIdx { + end := endIdx[j] + + // Add (or reset) source range header for + // upload part copy request. + h.Set("x-amz-copy-source-range", + fmt.Sprintf("bytes=%d-%d", start, end)) + + // make upload-part-copy request + complPart, err := c.uploadPartCopy(ctx, dst.bucket, + dst.object, uploadID, partIndex, h) + if err != nil { + return err + } + if progress != nil { + io.CopyN(ioutil.Discard, progress, end-start+1) + } + objParts = append(objParts, complPart) + partIndex++ + } + } + + // 4. Make final complete-multipart request. + _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, + completeMultipartUpload{Parts: objParts}) + if err != nil { + return err + } + return nil +} + +// ComposeObject - creates an object using server-side copying of +// existing objects. It takes a list of source objects (with optional +// offsets) and concatenates them into a new object using only +// server-side copying operations. +func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error { + return c.ComposeObjectWithProgress(dst, srcs, nil) +} + +// partsRequired is maximum parts possible with +// max part size of ceiling(maxMultipartPutObjectSize / (maxPartsCount - 1)) +func partsRequired(size int64) int64 { + maxPartSize := maxMultipartPutObjectSize / (maxPartsCount - 1) + r := size / int64(maxPartSize) + if size%int64(maxPartSize) > 0 { + r++ + } + return r +} + +// calculateEvenSplits - computes splits for a source and returns +// start and end index slices. Splits happen evenly to be sure that no +// part is less than 5MiB, as that could fail the multipart request if +// it is not the last part. +func calculateEvenSplits(size int64, src SourceInfo) (startIndex, endIndex []int64) { + if size == 0 { + return + } + + reqParts := partsRequired(size) + startIndex = make([]int64, reqParts) + endIndex = make([]int64, reqParts) + // Compute number of required parts `k`, as: + // + // k = ceiling(size / copyPartSize) + // + // Now, distribute the `size` bytes in the source into + // k parts as evenly as possible: + // + // r parts sized (q+1) bytes, and + // (k - r) parts sized q bytes, where + // + // size = q * k + r (by simple division of size by k, + // so that 0 <= r < k) + // + start := src.start + if start == -1 { + start = 0 + } + quot, rem := size/reqParts, size%reqParts + nextStart := start + for j := int64(0); j < reqParts; j++ { + curPartSize := quot + if j < rem { + curPartSize++ + } + + cStart := nextStart + cEnd := cStart + curPartSize - 1 + nextStart = cEnd + 1 + + startIndex[j], endIndex[j] = cStart, cEnd + } + return +} diff --git a/vendor/github.com/minio/minio-go/api-datatypes.go b/vendor/github.com/minio/minio-go/api-datatypes.go new file mode 100644 index 0000000..63fc089 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-datatypes.go @@ -0,0 +1,84 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "time" +) + +// BucketInfo container for bucket metadata. +type BucketInfo struct { + // The name of the bucket. + Name string `json:"name"` + // Date the bucket was created. + CreationDate time.Time `json:"creationDate"` +} + +// ObjectInfo container for object metadata. +type ObjectInfo struct { + // An ETag is optionally set to md5sum of an object. In case of multipart objects, + // ETag is of the form MD5SUM-N where MD5SUM is md5sum of all individual md5sums of + // each parts concatenated into one string. + ETag string `json:"etag"` + + Key string `json:"name"` // Name of the object + LastModified time.Time `json:"lastModified"` // Date and time the object was last modified. + Size int64 `json:"size"` // Size in bytes of the object. + ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata" xml:"-"` + + // Owner name. + Owner struct { + DisplayName string `json:"name"` + ID string `json:"id"` + } `json:"owner"` + + // The class of storage used to store the object. + StorageClass string `json:"storageClass"` + + // Error + Err error `json:"-"` +} + +// ObjectMultipartInfo container for multipart object metadata. +type ObjectMultipartInfo struct { + // Date and time at which the multipart upload was initiated. + Initiated time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Initiator initiator + Owner owner + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass string + + // Key of the object for which the multipart upload was initiated. + Key string + + // Size in bytes of the object. + Size int64 + + // Upload ID that identifies the multipart upload. + UploadID string `xml:"UploadId"` + + // Error + Err error +} diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go new file mode 100644 index 0000000..0170b8d --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-error-response.go @@ -0,0 +1,282 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "fmt" + "net/http" +) + +/* **** SAMPLE ERROR RESPONSE **** + + + AccessDenied + Access Denied + bucketName + objectName + F19772218238A85A + GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD + +*/ + +// ErrorResponse - Is the typed error returned by all API operations. +// ErrorResponse struct should be comparable since it is compared inside +// golang http API (https://github.com/golang/go/issues/29768) +type ErrorResponse struct { + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + BucketName string + Key string + RequestID string `xml:"RequestId"` + HostID string `xml:"HostId"` + + // Region where the bucket is located. This header is returned + // only in HEAD bucket and ListObjects response. + Region string + + // Underlying HTTP status code for the returned error + StatusCode int `xml:"-" json:"-"` +} + +// ToErrorResponse - Returns parsed ErrorResponse struct from body and +// http headers. +// +// For example: +// +// import s3 "github.com/minio/minio-go" +// ... +// ... +// reader, stat, err := s3.GetObject(...) +// if err != nil { +// resp := s3.ToErrorResponse(err) +// } +// ... +func ToErrorResponse(err error) ErrorResponse { + switch err := err.(type) { + case ErrorResponse: + return err + default: + return ErrorResponse{} + } +} + +// Error - Returns S3 error string. +func (e ErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } + return e.Message +} + +// Common string for errors to report issue location in unexpected +// cases. +const ( + reportIssue = "Please report this issue at https://github.com/minio/minio-go/issues." +) + +// httpRespToErrorResponse returns a new encoded ErrorResponse +// structure as error. +func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) error { + if resp == nil { + msg := "Response is empty. " + reportIssue + return ErrInvalidArgument(msg) + } + + errResp := ErrorResponse{ + StatusCode: resp.StatusCode, + } + + err := xmlDecoder(resp.Body, &errResp) + // Xml decoding failed with no body, fall back to HTTP headers. + if err != nil { + switch resp.StatusCode { + case http.StatusNotFound: + if objectName == "" { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, + } + } else { + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, + } + } + case http.StatusForbidden: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, + } + case http.StatusConflict: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + } + case http.StatusPreconditionFailed: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: "PreconditionFailed", + Message: s3ErrorResponseMap["PreconditionFailed"], + BucketName: bucketName, + Key: objectName, + } + default: + errResp = ErrorResponse{ + StatusCode: resp.StatusCode, + Code: resp.Status, + Message: resp.Status, + BucketName: bucketName, + } + } + } + + // Save hostID, requestID and region information + // from headers if not available through error XML. + if errResp.RequestID == "" { + errResp.RequestID = resp.Header.Get("x-amz-request-id") + } + if errResp.HostID == "" { + errResp.HostID = resp.Header.Get("x-amz-id-2") + } + if errResp.Region == "" { + errResp.Region = resp.Header.Get("x-amz-bucket-region") + } + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region) + } + + return errResp +} + +// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func ErrTransferAccelerationBucket(bucketName string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.", + BucketName: bucketName, + } +} + +// ErrEntityTooLarge - Input size is larger than supported maximum. +func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooLarge", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrEntityTooSmall - Input size is smaller than supported minimum. +func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "EntityTooSmall", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrUnexpectedEOF - Unexpected end of file reached. +func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error { + msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize) + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "UnexpectedEOF", + Message: msg, + BucketName: bucketName, + Key: objectName, + } +} + +// ErrInvalidBucketName - Invalid bucket name response. +func ErrInvalidBucketName(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidBucketName", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectName - Invalid object name response. +func ErrInvalidObjectName(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotFound, + Code: "NoSuchKey", + Message: message, + RequestID: "minio", + } +} + +// ErrInvalidObjectPrefix - Invalid object prefix response is +// similar to object name response. +var ErrInvalidObjectPrefix = ErrInvalidObjectName + +// ErrInvalidArgument - Invalid argument response. +func ErrInvalidArgument(message string) error { + return ErrorResponse{ + StatusCode: http.StatusBadRequest, + Code: "InvalidArgument", + Message: message, + RequestID: "minio", + } +} + +// ErrNoSuchBucketPolicy - No Such Bucket Policy response +// The specified bucket does not have a bucket policy. +func ErrNoSuchBucketPolicy(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotFound, + Code: "NoSuchBucketPolicy", + Message: message, + RequestID: "minio", + } +} + +// ErrAPINotSupported - API not supported response +// The specified API call is not supported +func ErrAPINotSupported(message string) error { + return ErrorResponse{ + StatusCode: http.StatusNotImplemented, + Code: "APINotSupported", + Message: message, + RequestID: "minio", + } +} diff --git a/vendor/github.com/minio/minio-go/api-get-lifecycle.go b/vendor/github.com/minio/minio-go/api-get-lifecycle.go new file mode 100644 index 0000000..8097bfc --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-lifecycle.go @@ -0,0 +1,77 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// GetBucketLifecycle - get bucket lifecycle. +func (c Client) GetBucketLifecycle(bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketLifecycle, err := c.getBucketLifecycle(bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchLifecycleConfiguration" { + return "", nil + } + return "", err + } + return bucketLifecycle, nil +} + +// Request server for current bucket lifecycle. +func (c Client) getBucketLifecycle(bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute GET on bucket to get lifecycle. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + lifecycle := string(bucketLifecycleBuf) + return lifecycle, err +} diff --git a/vendor/github.com/minio/minio-go/api-get-object-acl.go b/vendor/github.com/minio/minio-go/api-get-object-acl.go new file mode 100644 index 0000000..af5544d --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-object-acl.go @@ -0,0 +1,136 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "net/url" +) + +type accessControlPolicy struct { + Owner struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + } `xml:"Owner"` + AccessControlList struct { + Grant []struct { + Grantee struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` + } `xml:"Grantee"` + Permission string `xml:"Permission"` + } `xml:"Grant"` + } `xml:"AccessControlList"` +} + +//GetObjectACL get object ACLs +func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) { + + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: url.Values{ + "acl": []string{""}, + }, + }) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + res := &accessControlPolicy{} + + if err := xmlDecoder(resp.Body, res); err != nil { + return nil, err + } + + objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{}) + if err != nil { + return nil, err + } + + cannedACL := getCannedACL(res) + if cannedACL != "" { + objInfo.Metadata.Add("X-Amz-Acl", cannedACL) + return &objInfo, nil + } + + grantACL := getAmzGrantACL(res) + for k, v := range grantACL { + objInfo.Metadata[k] = v + } + + return &objInfo, nil +} + +func getCannedACL(aCPolicy *accessControlPolicy) string { + grants := aCPolicy.AccessControlList.Grant + + switch { + case len(grants) == 1: + if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { + return "private" + } + case len(grants) == 2: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return "authenticated-read" + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + return "public-read" + } + if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { + return "bucket-owner-read" + } + } + case len(grants) == 3: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + return "public-read-write" + } + } + } + return "" +} + +func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { + grants := aCPolicy.AccessControlList.Grant + res := map[string][]string{} + + for _, g := range grants { + switch { + case g.Permission == "READ": + res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) + case g.Permission == "WRITE": + res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) + case g.Permission == "READ_ACP": + res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) + case g.Permission == "WRITE_ACP": + res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) + case g.Permission == "FULL_CONTROL": + res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) + } + } + return res +} diff --git a/vendor/github.com/minio/minio-go/api-get-object-context.go b/vendor/github.com/minio/minio-go/api-get-object-context.go new file mode 100644 index 0000000..f8dfac7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-object-context.go @@ -0,0 +1,26 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "context" + +// GetObjectWithContext - returns an seekable, readable object. +// The options can be used to specify the GET request further. +func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + return c.getObjectWithContext(ctx, bucketName, objectName, opts) +} diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go new file mode 100644 index 0000000..a852220 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -0,0 +1,125 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "os" + "path/filepath" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// FGetObjectWithContext - download contents of an object to a local file. +// The options can be used to specify the GET request further. +func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts) +} + +// FGetObject - download contents of an object to a local file. +func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error { + return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) +} + +// fGetObjectWithContext - fgetObject wrapper function with context +func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Verify if destination already exists. + st, err := os.Stat(filePath) + if err == nil { + // If the destination exists and is a directory. + if st.IsDir() { + return ErrInvalidArgument("fileName is a directory.") + } + } + + // Proceed if file does not exist. return for all other errors. + if err != nil { + if !os.IsNotExist(err) { + return err + } + } + + // Extract top level directory. + objectDir, _ := filepath.Split(filePath) + if objectDir != "" { + // Create any missing top level directories. + if err := os.MkdirAll(objectDir, 0700); err != nil { + return err + } + } + + // Gather md5sum. + objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts}) + if err != nil { + return err + } + + // Write to a temporary file "fileName.part.minio" before saving. + filePartPath := filePath + objectStat.ETag + ".part.minio" + + // If exists, open in append mode. If not create it as a part file. + filePart, err := os.OpenFile(filePartPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + return err + } + + // Issue Stat to get the current offset. + st, err = filePart.Stat() + if err != nil { + return err + } + + // Initialize get object request headers to set the + // appropriate range offsets to read from. + if st.Size() > 0 { + opts.SetRange(st.Size(), 0) + } + + // Seek to current position for incoming reader. + objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + return err + } + + // Write to the part file. + if _, err = io.CopyN(filePart, objectReader, objectStat.Size); err != nil { + return err + } + + // Close the file before rename, this is specifically needed for Windows users. + if err = filePart.Close(); err != nil { + return err + } + + // Safely completed. Now commit by renaming to actual filename. + if err = os.Rename(filePartPath, filePath); err != nil { + return err + } + + // Return. + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go new file mode 100644 index 0000000..0bf556e --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -0,0 +1,659 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// GetObject - returns an seekable, readable object. +func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + return c.getObjectWithContext(context.Background(), bucketName, objectName, opts) +} + +// GetObject wrapper function that accepts a request context +func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + var httpReader io.ReadCloser + var objectInfo ObjectInfo + var err error + + // Create request channel. + reqCh := make(chan getRequest) + // Create response channel. + resCh := make(chan getResponse) + // Create done channel. + doneCh := make(chan struct{}) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(reqCh) + defer close(resCh) + + // Used to verify if etag of object has changed since last read. + var etag string + + // Loop through the incoming control messages and read data. + for { + select { + // When the done channel is closed exit our routine. + case <-doneCh: + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + return + + // Gather incoming request. + case req := <-reqCh: + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{Error: err} + return + } + etag = objectInfo.ETag + // Read at least firstReq.Buffer bytes, if not we have + // reached our EOF. + size, err := io.ReadFull(httpReader, req.Buffer) + if size > 0 && err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + Size: int(size), + Error: err, + didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + etag = objectInfo.ETag + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + // Remove range header if already set, for stat Operations to get original file size. + delete(opts.headers, "Range") + if etag != "" { + opts.SetMatchETag(etag) + } + objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts}) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + if etag != "" { + opts.SetMatchETag(etag) + } + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + } else if req.Offset > 0 { // Range is set with respect to the offset. + opts.SetRange(req.Offset, 0) + } + httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts) + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := io.ReadFull(httpReader, req.Buffer) + if err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + // Reply back how much was read. + resCh <- getResponse{ + Size: int(size), + Error: err, + didRead: true, + objectInfo: objectInfo, + } + } + } + } + }() + + // Create a newObject through the information sent back by reqCh. + return newObject(reqCh, resCh, doneCh), nil +} + +// get request message container to communicate with internal +// go-routine. +type getRequest struct { + Buffer []byte + Offset int64 // readAt offset. + DidOffsetChange bool // Tracks the offset changes for Seek requests. + beenRead bool // Determines if this is the first time an object is being read. + isReadAt bool // Determines if this request is a request to a specific range + isReadOp bool // Determines if this request is a Read or Read/At request. + isFirstReq bool // Determines if this request is the first time an object is being accessed. + settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. +} + +// get response message container to reply back for the request. +type getResponse struct { + Size int + Error error + didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. + objectInfo ObjectInfo // Used for the first request. +} + +// Object represents an open object. It implements +// Reader, ReaderAt, Seeker, Closer for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- getRequest + resCh <-chan getResponse + doneCh chan<- struct{} + currOffset int64 + objectInfo ObjectInfo + + // Ask lower level to initiate data fetching based on currOffset + seekData bool + + // Keeps track of closed call. + isClosed bool + + // Keeps track of if this is the first call. + isStarted bool + + // Previous error saved for future calls. + prevErr error + + // Keeps track of if this object has been read yet. + beenRead bool + + // Keeps track of if objectInfo has been set yet. + objectInfoSet bool +} + +// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. +// Returns back the size of the buffer read, if anything was read, as well +// as any error encountered. For all first requests sent on the object +// it is also responsible for sending back the objectInfo. +func (o *Object) doGetRequest(request getRequest) (getResponse, error) { + o.reqCh <- request + response := <-o.resCh + + // Return any error to the top level. + if response.Error != nil { + return response, response.Error + } + + // This was the first request. + if !o.isStarted { + // The object has been operated on. + o.isStarted = true + } + // Set the objectInfo if the request was not readAt + // and it hasn't been set before. + if !o.objectInfoSet && !request.isReadAt { + o.objectInfo = response.objectInfo + o.objectInfoSet = true + } + // Set beenRead only if it has not been set before. + if !o.beenRead { + o.beenRead = response.didRead + } + // Data are ready on the wire, no need to reinitiate connection in lower level + o.seekData = false + + return response, nil +} + +// setOffset - handles the setting of offsets for +// Read/ReadAt/Seek requests. +func (o *Object) setOffset(bytesRead int64) error { + // Update the currentOffset. + o.currOffset += bytesRead + + if o.objectInfo.Size > -1 && o.currOffset >= o.objectInfo.Size { + return io.EOF + } + return nil +} + +// Read reads up to len(b) bytes into b. It returns the number of +// bytes read (0 <= n <= len(b)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is previous error saved from previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + // Create a new request. + readReq := getRequest{ + isReadOp: true, + beenRead: o.beenRead, + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readReq.isFirstReq = true + } + + // Ask to establish a new data fetch routine based on seekData flag + readReq.DidOffsetChange = o.seekData + readReq.Offset = o.currOffset + + // Send and receive from the first request. + response, err := o.doGetRequest(readReq) + if err != nil && err != io.EOF { + // Save the error for future calls. + o.prevErr = err + return response.Size, err + } + + // Bytes read. + bytesRead := int64(response.Size) + + // Set the new offset. + oerr := o.setOffset(bytesRead) + if oerr != nil { + // Save the error for future calls. + o.prevErr = oerr + return response.Size, oerr + } + + // Return the response. + return response.Size, err +} + +// Stat returns the ObjectInfo structure describing Object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + // This is the first request. + if !o.isStarted || !o.objectInfoSet { + // Send the request and get the response. + _, err := o.doGetRequest(getRequest{ + isFirstReq: !o.isStarted, + settingObjectInfo: !o.objectInfoSet, + }) + if err != nil { + o.prevErr = err + return ObjectInfo{}, err + } + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is error which was saved in previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // Can only compare offsets to size when size has been set. + if o.objectInfoSet { + // If offset is negative than we return io.EOF. + // If offset is greater than or equal to object size we return io.EOF. + if (o.objectInfo.Size > -1 && offset >= o.objectInfo.Size) || offset < 0 { + return 0, io.EOF + } + } + + // Create the new readAt request. + readAtReq := getRequest{ + isReadOp: true, + isReadAt: true, + DidOffsetChange: true, // Offset always changes. + beenRead: o.beenRead, // Set if this is the first request to try and read. + Offset: offset, // Set the offset. + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readAtReq.isFirstReq = true + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readAtReq) + if err != nil && err != io.EOF { + // Save the error. + o.prevErr = err + return response.Size, err + } + // Bytes read. + bytesRead := int64(response.Size) + // There is no valid objectInfo yet + // to compare against for EOF. + if !o.objectInfoSet { + // Update the currentOffset. + o.currOffset += bytesRead + } else { + // If this was not the first request update + // the offsets and compare against objectInfo + // for EOF. + oerr := o.setOffset(bytesRead) + if oerr != nil { + o.prevErr = oerr + return response.Size, oerr + } + } + return response.Size, err +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil { + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != io.EOF { + return 0, o.prevErr + } + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d", whence)) + } + + // This is the first request. So before anything else + // get the ObjectInfo. + if !o.isStarted || !o.objectInfoSet { + // Create the new Seek request. + seekReq := getRequest{ + isReadOp: false, + Offset: offset, + isFirstReq: true, + } + // Send and receive from the seek request. + _, err := o.doGetRequest(seekReq) + if err != nil { + // Save the error. + o.prevErr = err + return 0, err + } + } + + // Switch through whence. + switch whence { + default: + return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if o.objectInfo.Size > -1 && offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset = offset + case 1: + if o.objectInfo.Size > -1 && o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset += offset + case 2: + // If we don't know the object size return an error for io.SeekEnd + if o.objectInfo.Size < 0 { + return 0, ErrInvalidArgument("Whence END is not supported when the object size is unknown") + } + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + o.currOffset = o.objectInfo.Size + offset + } + // Reset the saved error since we successfully seeked, let the Read + // and ReadAt decide. + if o.prevErr == io.EOF { + o.prevErr = nil + } + + // Ask lower level to fetch again from source + o.seekData = true + + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + close(o.doneCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +// ObjectInfo will be set by setObjectInfo +func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { + return &Object{ + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { + // Validate input arguments. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, err + } + + // Execute GET on objectName. + resp, err := c.executeMethod(ctx, "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: opts.Header(), + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + return nil, ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse the date. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + msg := "Last-Modified time format not recognized. " + reportIssue + return nil, ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: msg, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + + // Get content-type. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + objectStat := ObjectInfo{ + ETag: md5sum, + Key: objectName, + Size: resp.ContentLength, + LastModified: date, + ContentType: contentType, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: extractObjMetadata(resp.Header), + } + + // do not close body here, caller will close + return resp.Body, objectStat, nil +} diff --git a/vendor/github.com/minio/minio-go/api-get-options.go b/vendor/github.com/minio/minio-go/api-get-options.go new file mode 100644 index 0000000..dbf062d --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-options.go @@ -0,0 +1,128 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "time" + + "github.com/minio/minio-go/pkg/encrypt" +) + +// GetObjectOptions are used to specify additional headers or options +// during GET requests. +type GetObjectOptions struct { + headers map[string]string + ServerSideEncryption encrypt.ServerSide +} + +// StatObjectOptions are used to specify additional headers or options +// during GET info/stat requests. +type StatObjectOptions struct { + GetObjectOptions +} + +// Header returns the http.Header representation of the GET options. +func (o GetObjectOptions) Header() http.Header { + headers := make(http.Header, len(o.headers)) + for k, v := range o.headers { + headers.Set(k, v) + } + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// Set adds a key value pair to the options. The +// key-value pair will be part of the HTTP GET request +// headers. +func (o *GetObjectOptions) Set(key, value string) { + if o.headers == nil { + o.headers = make(map[string]string) + } + o.headers[http.CanonicalHeaderKey(key)] = value +} + +// SetMatchETag - set match etag. +func (o *GetObjectOptions) SetMatchETag(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + o.Set("If-Match", "\""+etag+"\"") + return nil +} + +// SetMatchETagExcept - set match etag except. +func (o *GetObjectOptions) SetMatchETagExcept(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + o.Set("If-None-Match", "\""+etag+"\"") + return nil +} + +// SetUnmodified - set unmodified time since. +func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetModified - set modified time since. +func (o *GetObjectOptions) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + o.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetRange - set the start and end offset of the object to be read. +// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. +func (o *GetObjectOptions) SetRange(start, end int64) error { + switch { + case start == 0 && end < 0: + // Read last '-end' bytes. `bytes=-N`. + o.Set("Range", fmt.Sprintf("bytes=%d", end)) + case 0 < start && end == 0: + // Read everything starting from offset + // 'start'. `bytes=N-`. + o.Set("Range", fmt.Sprintf("bytes=%d-", start)) + case 0 <= start && start <= end: + // Read everything starting at 'start' till the + // 'end'. `bytes=N-M` + o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + default: + // All other cases such as + // bytes=-3- + // bytes=5-3 + // bytes=-2-4 + // bytes=-3-0 + // bytes=-3--2 + // are invalid. + return ErrInvalidArgument( + fmt.Sprintf( + "Invalid range specified: start=%d end=%d", + start, end)) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go new file mode 100644 index 0000000..12d4c59 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -0,0 +1,78 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// GetBucketPolicy - get bucket policy at a given path. +func (c Client) GetBucketPolicy(bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketPolicy, err := c.getBucketPolicy(bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return "", nil + } + return "", err + } + return bucketPolicy, nil +} + +// Request server for current bucket policy. +func (c Client) getBucketPolicy(bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + policy := string(bucketPolicyBuf) + return policy, err +} diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go new file mode 100644 index 0000000..2f1350a --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-list.go @@ -0,0 +1,715 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// ListBuckets list all buckets owned by this authenticated user. +// +// This call requires explicit authentication, no anonymous requests are +// allowed for listing buckets. +// +// api := client.New(....) +// for message := range api.ListBuckets() { +// fmt.Println(message) +// } +// +func (c Client) ListBuckets() ([]BucketInfo, error) { + // Execute GET on service. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex}) + defer closeResponse(resp) + if err != nil { + return nil, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, "", "") + } + } + listAllMyBucketsResult := listAllMyBucketsResult{} + err = xmlDecoder(resp.Body, &listAllMyBucketsResult) + if err != nil { + return nil, err + } + return listAllMyBucketsResult.Buckets.Bucket, nil +} + +/// Bucket Read Operations. + +// ListObjectsV2 lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recursively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { +// fmt.Println(message) +// } +// +func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + + // Return object owner information by default + fetchOwner := true + + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save continuationToken for next request. + var continuationToken string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000, "") + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + select { + // Send object prefixes. + case objectStatCh <- ObjectInfo{ + Key: obj.Prefix, + Size: 0, + }: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If continuation token present, save it for next request. + if result.NextContinuationToken != "" { + continuationToken = result.NextContinuationToken + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?continuation-token - Used to continue iterating over a set of objects +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +// ?start-after - Specifies the key to start after when listing objects in a bucket. +func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketV2Result{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketV2Result{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Always set list-type in ListObjects V2 + urlValues.Set("list-type", "2") + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set continuation token + if continuationToken != "" { + urlValues.Set("continuation-token", continuationToken) + } + + // Fetch owner when listing + if fetchOwner { + urlValues.Set("fetch-owner", "true") + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Set start-after + if startAfter != "" { + urlValues.Set("start-after", startAfter) + } + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketV2Result{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode listBuckets XML. + listBucketResult := ListBucketV2Result{} + if err = xmlDecoder(resp.Body, &listBucketResult); err != nil { + return listBucketResult, err + } + + // This is an additional verification check to make + // sure proper responses are received. + if listBucketResult.IsTruncated && listBucketResult.NextContinuationToken == "" { + return listBucketResult, errors.New("Truncated response should have continuation token set") + } + + // Success. + return listBucketResult, nil +} + +// ListObjects - (List Objects) - List some objects or all recursively. +// +// ListObjects lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListObjects("mytestbucket", "starthere", recursive, doneCh) { +// fmt.Println(message) +// } +// +func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save marker for next request. + var marker string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsQuery(bucketName, objectPrefix, marker, delimiter, 1000) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + marker = object.Key + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send object prefixes. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If next marker present, save it for next request. + if result.NextMarker != "" { + marker = result.NextMarker + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?marker - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ListBucketResult{}, err + } + // Validate object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + return ListBucketResult{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", objectPrefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // Set object marker. + if objectMarker != "" { + urlValues.Set("marker", objectMarker) + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode listBuckets XML. + listBucketResult := ListBucketResult{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + return listBucketResult, nil +} + +// ListIncompleteUploads - List incompletely uploaded multipart objects. +// +// ListIncompleteUploads lists all incompleted objects matching the +// objectPrefix from the specified bucket. If recursion is enabled +// it would list all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel to pro-actively close the internal go routine. +// If you enable recursive as 'true' this function will return back all +// the multipart objects in a given bucket name. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListIncompleteUploads("mytestbucket", "starthere", recursive) { +// fmt.Println(message) +// } +// +func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { + // Turn on size aggregation of individual parts. + isAggregateSize := true + return c.listIncompleteUploads(bucketName, objectPrefix, recursive, isAggregateSize, doneCh) +} + +// listIncompleteUploads lists all incomplete uploads. +func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { + // Allocate channel for multipart uploads. + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) + // Delimiter is set to "/" by default. + delimiter := "/" + if recursive { + // If recursive do not delimit. + delimiter = "" + } + // Validate bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + // Validate incoming object prefix. + if err := s3utils.CheckValidObjectNamePrefix(objectPrefix); err != nil { + defer close(objectMultipartStatCh) + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return objectMultipartStatCh + } + go func(objectMultipartStatCh chan<- ObjectMultipartInfo) { + defer close(objectMultipartStatCh) + // object and upload ID marker for future requests. + var objectMarker string + var uploadIDMarker string + for { + // list all multipart uploads. + result, err := c.listMultipartUploadsQuery(bucketName, objectMarker, uploadIDMarker, objectPrefix, delimiter, 1000) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + return + } + // Save objectMarker and uploadIDMarker for next request. + objectMarker = result.NextKeyMarker + uploadIDMarker = result.NextUploadIDMarker + // Send all multipart uploads. + for _, obj := range result.Uploads { + // Calculate total size of the uploaded parts if 'aggregateSize' is enabled. + if aggregateSize { + // Get total multipart size. + obj.Size, err = c.getTotalMultipartSize(bucketName, obj.Key, obj.UploadID) + if err != nil { + objectMultipartStatCh <- ObjectMultipartInfo{ + Err: err, + } + continue + } + } + select { + // Send individual uploads here. + case objectMultipartStatCh <- obj: + // If done channel return here. + case <-doneCh: + return + } + } + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectMultipartInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send delimited prefixes here. + case objectMultipartStatCh <- object: + // If done channel return here. + case <-doneCh: + return + } + } + // Listing ends if result not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectMultipartStatCh) + // return. + return objectMultipartStatCh +} + +// listMultipartUploads - (List Multipart Uploads). +// - Lists some or all (up to 1000) in-progress multipart uploads in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the uploads in a bucket. +// request parameters. :- +// --------- +// ?key-marker - Specifies the multipart upload after which listing should begin. +// ?upload-id-marker - Together with key-marker specifies the multipart upload after which listing should begin. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. +func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set uploads. + urlValues.Set("uploads", "") + // Set object key marker. + if keyMarker != "" { + urlValues.Set("key-marker", keyMarker) + } + // Set upload id marker. + if uploadIDMarker != "" { + urlValues.Set("upload-id-marker", uploadIDMarker) + } + + // Set object prefix, prefix value to be set to empty is okay. + urlValues.Set("prefix", prefix) + + // Set delimiter, delimiter value to be set to empty is okay. + urlValues.Set("delimiter", delimiter) + + // maxUploads should be 1000 or less. + if maxUploads == 0 || maxUploads > 1000 { + maxUploads = 1000 + } + // Set max-uploads. + urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) + + // Execute GET on bucketName to list multipart uploads. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListMultipartUploadsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + // Decode response body. + listMultipartUploadsResult := ListMultipartUploadsResult{} + err = xmlDecoder(resp.Body, &listMultipartUploadsResult) + if err != nil { + return listMultipartUploadsResult, err + } + return listMultipartUploadsResult, nil +} + +// listObjectParts list all object parts recursively. +func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { + // Part number marker for the next batch of request. + var nextPartNumberMarker int + partsInfo = make(map[int]ObjectPart) + for { + // Get list of uploaded parts a maximum of 1000 per request. + listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) + if err != nil { + return nil, err + } + // Append to parts info. + for _, part := range listObjPartsResult.ObjectParts { + // Trim off the odd double quotes from ETag in the beginning and end. + part.ETag = strings.TrimPrefix(part.ETag, "\"") + part.ETag = strings.TrimSuffix(part.ETag, "\"") + partsInfo[part.PartNumber] = part + } + // Keep part number marker, for the next iteration. + nextPartNumberMarker = listObjPartsResult.NextPartNumberMarker + // Listing ends result is not truncated, return right here. + if !listObjPartsResult.IsTruncated { + break + } + } + + // Return all the parts. + return partsInfo, nil +} + +// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. +func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) { + var uploadIDs []string + // Make list incomplete uploads recursive. + isRecursive := true + // Turn off size aggregation of individual parts, in this request. + isAggregateSize := false + // Create done channel to cleanup the routine. + doneCh := make(chan struct{}) + defer close(doneCh) + // List all incomplete uploads. + for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { + if mpUpload.Err != nil { + return nil, mpUpload.Err + } + if objectName == mpUpload.Key { + uploadIDs = append(uploadIDs, mpUpload.UploadID) + } + } + // Return the latest upload id. + return uploadIDs, nil +} + +// getTotalMultipartSize - calculate total uploaded size for the a given multipart object. +func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) (size int64, err error) { + // Iterate over all parts and aggregate the size. + partsInfo, err := c.listObjectParts(bucketName, objectName, uploadID) + if err != nil { + return 0, err + } + for _, partInfo := range partsInfo { + size += partInfo.Size + } + return size, nil +} + +// listObjectPartsQuery (List Parts query) +// - lists some or all (up to 1000) parts that have been uploaded +// for a specific multipart upload +// +// You can use the request parameters as selection criteria to return +// a subset of the uploads in a bucket, request parameters :- +// --------- +// ?part-number-marker - Specifies the part after which listing should +// begin. +// ?max-parts - Maximum parts to be listed per request. +func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number marker. + urlValues.Set("part-number-marker", fmt.Sprintf("%d", partNumberMarker)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // maxParts should be 1000 or less. + if maxParts == 0 || maxParts > 1000 { + maxParts = 1000 + } + // Set max parts. + urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) + + // Execute GET on objectName to get list of parts. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return ListObjectPartsResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode list object parts XML. + listObjectPartsResult := ListObjectPartsResult{} + err = xmlDecoder(resp.Body, &listObjectPartsResult) + if err != nil { + return listObjectPartsResult, err + } + return listObjectPartsResult, nil +} diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go new file mode 100644 index 0000000..1c01e36 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-notification.go @@ -0,0 +1,228 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// GetBucketNotification - get bucket notification at a given path. +func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return BucketNotification{}, err + } + notification, err := c.getBucketNotification(bucketName) + if err != nil { + return BucketNotification{}, err + } + return notification, nil +} + +// Request server for notification rules. +func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + + defer closeResponse(resp) + if err != nil { + return BucketNotification{}, err + } + return processBucketNotificationResponse(bucketName, resp) + +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return BucketNotification{}, errResponse + } + var bucketNotification BucketNotification + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return BucketNotification{}, err + } + return bucketNotification, nil +} + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// Notification event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// Notification event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// Notification event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// sourceInfo represents information on the client that +// triggered the event notification. +type sourceInfo struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// NotificationEvent represents an Amazon an S3 bucket notification event. +type NotificationEvent struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` + Source sourceInfo `json:"source"` +} + +// NotificationInfo - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type NotificationInfo struct { + Records []NotificationEvent + Err error +} + +// ListenBucketNotification - listen on bucket notifications. +func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo { + notificationInfoCh := make(chan NotificationInfo, 1) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- NotificationInfo) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + notificationInfoCh <- NotificationInfo{ + Err: err, + } + return + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(*c.endpointURL) || s3utils.IsGoogleEndpoint(*c.endpointURL) { + notificationInfoCh <- NotificationInfo{ + Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"), + } + return + } + + // Continuously run and listen on bucket notification. + // Create a done channel to control 'ListObjects' go routine. + retryDoneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(retryDoneCh) + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { + urlValues := make(url.Values) + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + if err != nil { + notificationInfoCh <- NotificationInfo{ + Err: err, + } + return + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + notificationInfoCh <- NotificationInfo{ + Err: errResponse, + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Close the response body. + defer resp.Body.Close() + + // Unmarshal each line, returns marshalled values. + for bio.Scan() { + var notificationInfo NotificationInfo + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + continue + } + // Send notificationInfo + select { + case notificationInfoCh <- notificationInfo: + case <-doneCh: + return + } + } + // Look for any underlying errors. + if err = bio.Err(); err != nil { + // For an unexpected connection drop from server, we close the body + // and re-connect. + if err == io.ErrUnexpectedEOF { + resp.Body.Close() + } + } + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go new file mode 100644 index 0000000..a2c0607 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-presigned.go @@ -0,0 +1,215 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" +) + +// presignURL - Returns a presigned URL for an input 'method'. +// Expires maximum is 7days - ie. 604800 and minimum is 1. +func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + // Input validation. + if method == "" { + return nil, ErrInvalidArgument("method cannot be empty.") + } + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err = isValidExpiry(expires); err != nil { + return nil, err + } + + // Convert expires into seconds. + expireSeconds := int64(expires / time.Second) + reqMetadata := requestMetadata{ + presignURL: true, + bucketName: bucketName, + objectName: objectName, + expires: expireSeconds, + queryValues: reqParams, + } + + // Instantiate a new request. + // Since expires is set newRequest will presign the request. + var req *http.Request + if req, err = c.newRequest(method, reqMetadata); err != nil { + return nil, err + } + return req.URL, nil +} + +// PresignedGetObject - Returns a presigned URL to access an object +// data without credentials. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL("GET", bucketName, objectName, expires, reqParams) +} + +// PresignedHeadObject - Returns a presigned URL to access object +// metadata without credentials. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. Additionally you can override +// a set of response headers using the query parameters. +func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL("HEAD", bucketName, objectName, expires, reqParams) +} + +// PresignedPutObject - Returns a presigned URL to upload an object +// without credentials. URL can have a maximum expiry of upto 7days +// or a minimum of 1sec. +func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + return c.presignURL("PUT", bucketName, objectName, expires, nil) +} + +// Presign - returns a presigned URL for any http method of your choice +// along with custom request params. URL can have a maximum expiry of +// upto 7days or a minimum of 1sec. +func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { + return c.presignURL(method, bucketName, objectName, expires, reqParams) +} + +// PresignedPostPolicy - Returns POST urlString, form data to upload an object. +func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) { + // Validate input arguments. + if p.expiration.IsZero() { + return nil, nil, errors.New("Expiration time must be specified") + } + if _, ok := p.formData["key"]; !ok { + return nil, nil, errors.New("object key must be specified") + } + if _, ok := p.formData["bucket"]; !ok { + return nil, nil, errors.New("bucket name must be specified") + } + + bucketName := p.formData["bucket"] + // Fetch the bucket location. + location, err := c.getBucketLocation(bucketName) + if err != nil { + return nil, nil, err + } + + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, bucketName) + + u, err = c.makeTargetURL(bucketName, "", location, isVirtualHost, nil) + if err != nil { + return nil, nil, err + } + + // Get credentials from the configured credentials provider. + credValues, err := c.credsProvider.Get() + if err != nil { + return nil, nil, err + } + + var ( + signerType = credValues.SignerType + sessionToken = credValues.SessionToken + accessKeyID = credValues.AccessKeyID + secretAccessKey = credValues.SecretAccessKey + ) + + if signerType.IsAnonymous() { + return nil, nil, ErrInvalidArgument("Presigned operations are not supported for anonymous credentials") + } + + // Keep time. + t := time.Now().UTC() + // For signature version '2' handle here. + if signerType.IsV2() { + policyBase64 := p.base64() + p.formData["policy"] = policyBase64 + // For Google endpoint set this value to be 'GoogleAccessId'. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + p.formData["GoogleAccessId"] = accessKeyID + } else { + // For all other endpoints set this value to be 'AWSAccessKeyId'. + p.formData["AWSAccessKeyId"] = accessKeyID + } + // Sign the policy. + p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, secretAccessKey) + return u, p.formData, nil + } + + // Add date policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-date", + value: t.Format(iso8601DateFormat), + }); err != nil { + return nil, nil, err + } + + // Add algorithm policy. + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-algorithm", + value: signV4Algorithm, + }); err != nil { + return nil, nil, err + } + + // Add a credential policy. + credential := s3signer.GetCredential(accessKeyID, location, t) + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-credential", + value: credential, + }); err != nil { + return nil, nil, err + } + + if sessionToken != "" { + if err = p.addNewPolicy(policyCondition{ + matchType: "eq", + condition: "$x-amz-security-token", + value: sessionToken, + }); err != nil { + return nil, nil, err + } + } + + // Get base64 encoded policy. + policyBase64 := p.base64() + + // Fill in the form data. + p.formData["policy"] = policyBase64 + p.formData["x-amz-algorithm"] = signV4Algorithm + p.formData["x-amz-credential"] = credential + p.formData["x-amz-date"] = t.Format(iso8601DateFormat) + if sessionToken != "" { + p.formData["x-amz-security-token"] = sessionToken + } + p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, secretAccessKey, location) + return u, p.formData, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go new file mode 100644 index 0000000..33dc0cf --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -0,0 +1,306 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/pkg/s3utils" +) + +/// Bucket operations + +// MakeBucket creates a new bucket with bucketName. +// +// Location is an optional argument, by default all buckets are +// created in US Standard Region. +// +// For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html +// For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations +func (c Client) MakeBucket(bucketName string, location string) (err error) { + defer func() { + // Save the location into cache on a successful makeBucket response. + if err == nil { + c.bucketLocCache.Set(bucketName, location) + } + }() + + // Validate the input arguments. + if err := s3utils.CheckValidBucketNameStrict(bucketName); err != nil { + return err + } + + // If location is empty, treat is a default region 'us-east-1'. + if location == "" { + location = "us-east-1" + // For custom region clients, default + // to custom region instead not 'us-east-1'. + if c.region != "" { + location = c.region + } + } + // PUT bucket request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + bucketLocation: location, + } + + // If location is not 'us-east-1' create bucket location config. + if location != "us-east-1" && location != "" { + createBucketConfig := createBucketConfiguration{} + createBucketConfig.Location = location + var createBucketConfigBytes []byte + createBucketConfigBytes, err = xml.Marshal(createBucketConfig) + if err != nil { + return err + } + reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes) + reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes) + reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes) + reqMetadata.contentLength = int64(len(createBucketConfigBytes)) + } + + // Execute PUT to create a new bucket. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Success. + return nil +} + +// SetBucketPolicy set the access permissions on an existing bucket. +func (c Client) SetBucketPolicy(bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If policy is empty then delete the bucket policy. + if policy == "" { + return c.removeBucketPolicy(bucketName) + } + + // Save the updated policies. + return c.putBucketPolicy(bucketName, policy) +} + +// Saves a new bucket policy. +func (c Client) putBucketPolicy(bucketName, policy string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Content-length is mandatory for put policy request + policyReader := strings.NewReader(policy) + b, err := ioutil.ReadAll(policyReader) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: policyReader, + contentLength: int64(len(b)), + } + + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c Client) removeBucketPolicy(bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// SetBucketLifecycle set the lifecycle on an existing bucket. +func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If lifecycle is empty then delete it. + if lifecycle == "" { + return c.removeBucketLifecycle(bucketName) + } + + // Save the updated lifecycle. + return c.putBucketLifecycle(bucketName, lifecycle) +} + +// Saves a new bucket lifecycle. +func (c Client) putBucketLifecycle(bucketName, lifecycle string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Content-length is mandatory for put lifecycle request + lifecycleReader := strings.NewReader(lifecycle) + b, err := ioutil.ReadAll(lifecycleReader) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: lifecycleReader, + contentLength: int64(len(b)), + contentMD5Base64: sumMD5Base64(b), + } + + // Execute PUT to upload a new bucket lifecycle. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Remove lifecycle from a bucket. +func (c Client) removeBucketLifecycle(bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + +// SetBucketNotification saves a new bucket notification. +func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(bucketNotification) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Base64: sumMD5Base64(notifBytes), + contentSHA256Hex: sum256Hex(notifBytes), + } + + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c Client) RemoveAllBucketNotification(bucketName string) error { + return c.SetBucketNotification(bucketName, BucketNotification{}) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go new file mode 100644 index 0000000..c16c3c6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -0,0 +1,111 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "math" + "os" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// Verify if reader is *minio.Object +func isObject(reader io.Reader) (ok bool) { + _, ok = reader.(*Object) + return +} + +// Verify if reader is a generic ReaderAt +func isReadAt(reader io.Reader) (ok bool) { + _, ok = reader.(io.ReaderAt) + if ok { + var v *os.File + v, ok = reader.(*os.File) + if ok { + // Stdin, Stdout and Stderr all have *os.File type + // which happen to also be io.ReaderAt compatible + // we need to add special conditions for them to + // be ignored by this function. + for _, f := range []string{ + "/dev/stdin", + "/dev/stdout", + "/dev/stderr", + } { + if f == v.Name() { + ok = false + break + } + } + } + } + return +} + +// optimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// minPartSize - 64MiB +// maxMultipartPutObjectSize - 5TiB +// +func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { + // object size is '-1' set it to 5TiB. + if objectSize == -1 { + objectSize = maxMultipartPutObjectSize + } + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = ErrEntityTooLarge(objectSize, maxMultipartPutObjectSize, "", "") + return + } + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt := math.Ceil(float64(objectSize / maxPartsCount)) + partSizeFlt = math.Ceil(partSizeFlt/minPartSize) * minPartSize + // Total parts count. + totalPartsCount = int(math.Ceil(float64(objectSize) / partSizeFlt)) + // Part size. + partSize = int64(partSizeFlt) + // Last part size. + lastPartSize = objectSize - int64(totalPartsCount-1)*partSize + return totalPartsCount, partSize, lastPartSize, nil +} + +// getUploadID - fetch upload id if already present for an object name +// or initiate a new request to fetch a new upload id. +func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return "", err + } + + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts) + if err != nil { + return "", err + } + return initMultipartUploadResult.UploadID, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-context.go b/vendor/github.com/minio/minio-go/api-put-object-context.go new file mode 100644 index 0000000..ff4663e --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-context.go @@ -0,0 +1,33 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" +) + +// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation. +func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (n int64, err error) { + err = opts.validate() + if err != nil { + return 0, err + } + return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go new file mode 100644 index 0000000..21322ef --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go @@ -0,0 +1,83 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017, 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "io/ioutil" + "net/http" + + "github.com/minio/minio-go/pkg/encrypt" +) + +// CopyObject - copy a source object into a new object +func (c Client) CopyObject(dst DestinationInfo, src SourceInfo) error { + return c.CopyObjectWithProgress(dst, src, nil) +} + +// CopyObjectWithProgress - copy a source object into a new object, optionally takes +// progress bar input to notify current progress. +func (c Client) CopyObjectWithProgress(dst DestinationInfo, src SourceInfo, progress io.Reader) error { + header := make(http.Header) + for k, v := range src.Headers { + header[k] = v + } + + var err error + var size int64 + // If progress bar is specified, size should be requested as well initiate a StatObject request. + if progress != nil { + size, _, _, err = src.getProps(c) + if err != nil { + return err + } + } + + if src.encryption != nil { + encrypt.SSECopy(src.encryption).Marshal(header) + } + + if dst.encryption != nil { + dst.encryption.Marshal(header) + } + for k, v := range dst.getUserMetaHeadersMap(true) { + header.Set(k, v) + } + + resp, err := c.executeMethod(context.Background(), "PUT", requestMetadata{ + bucketName: dst.bucket, + objectName: dst.object, + customHeader: header, + }) + if err != nil { + return err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, dst.bucket, dst.object) + } + + // Update the progress properly after successful copy. + if progress != nil { + io.CopyN(ioutil.Discard, progress, size) + } + + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file-context.go b/vendor/github.com/minio/minio-go/api-put-object-file-context.go new file mode 100644 index 0000000..140a9c0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-file-context.go @@ -0,0 +1,64 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "mime" + "os" + "path/filepath" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation. +func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Open the referenced file. + fileReader, err := os.Open(filePath) + // If any error fail quickly here. + if err != nil { + return 0, err + } + defer fileReader.Close() + + // Save the file stat. + fileStat, err := fileReader.Stat() + if err != nil { + return 0, err + } + + // Save the file size. + fileSize := fileStat.Size() + + // Set contentType based on filepath extension if not given or default + // value of "application/octet-stream" if the extension has no associated type. + if opts.ContentType == "" { + if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" { + opts.ContentType = "application/octet-stream" + } + } + return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go new file mode 100644 index 0000000..7c8e051 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -0,0 +1,27 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" +) + +// FPutObject - Create an object in a bucket, with contents from file at filePath +func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) { + return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go new file mode 100644 index 0000000..db92520 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -0,0 +1,372 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "runtime/debug" + "sort" + "strconv" + "strings" + + "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/s3utils" +) + +func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, + opts PutObjectOptions) (n int64, err error) { + n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + } + return n, err +} + +func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := optimalPartInfo(-1) + if err != nil { + return 0, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return 0, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + defer debug.FreeOSMemory() + + for partNumber <= totalPartsCount { + // Choose hash algorithms to be calculated by hashCopyN, + // avoid sha256 with non-v4 signature request or + // HTTPS connection. + hashAlgos, hashSums := c.hashMaterials() + + length, rErr := io.ReadFull(reader, buf) + if rErr == io.EOF { + break + } + if rErr != nil && rErr != io.ErrUnexpectedEOF { + return 0, rErr + } + + // Calculates hash sums while copying partSize bytes into cw. + for k, v := range hashAlgos { + v.Write(buf[:length]) + hashSums[k] = v.Sum(nil) + } + + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Checksums.. + var ( + md5Base64 string + sha256Hex string + ) + if hashSums["md5"] != nil { + md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) + } + if hashSums["sha256"] != nil { + sha256Hex = hex.EncodeToString(hashSums["sha256"]) + } + + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + md5Base64, sha256Hex, int64(length), opts.ServerSideEncryption) + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rErr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. +func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return initiateMultipartUploadResult{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return initiateMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploads", "") + + // Set ContentType header. + customHeader := opts.Header() + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + } + + // Execute POST on an objectName to initiate multipart upload. + resp, err := c.executeMethod(ctx, "POST", reqMetadata) + defer closeResponse(resp) + if err != nil { + return initiateMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Decode xml for new multipart upload. + initiateMultipartUploadResult := initiateMultipartUploadResult{} + err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) + if err != nil { + return initiateMultipartUploadResult, err + } + return initiateMultipartUploadResult, nil +} + +// uploadPart - Uploads a part in a multipart upload. +func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader, + partNumber int, md5Base64, sha256Hex string, size int64, sse encrypt.ServerSide) (ObjectPart, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectPart{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectPart{}, err + } + if size > maxPartSize { + return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) + } + if size <= -1 { + return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName) + } + if partNumber <= 0 { + return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") + } + if uploadID == "" { + return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.") + } + + // Get resources properly escaped and lined up before using them in http request. + urlValues := make(url.Values) + // Set part number. + urlValues.Set("partNumber", strconv.Itoa(partNumber)) + // Set upload id. + urlValues.Set("uploadId", uploadID) + + // Set encryption headers, if any. + customHeader := make(http.Header) + // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html + // Server-side encryption is supported by the S3 Multipart Upload actions. + // Unless you are using a customer-provided encryption key, you don't need + // to specify the encryption parameters in each UploadPart request. + if sse != nil && sse.Type() == encrypt.SSEC { + sse.Marshal(customHeader) + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + } + + // Execute PUT on each part. + resp, err := c.executeMethod(ctx, "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectPart{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // Once successfully uploaded, return completed part. + objPart := ObjectPart{} + objPart.Size = size + objPart.PartNumber = partNumber + // Trim off the odd double quotes from ETag in the beginning and end. + objPart.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objPart.ETag = strings.TrimSuffix(objPart.ETag, "\"") + return objPart, nil +} + +// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. +func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, + complete completeMultipartUpload) (completeMultipartUploadResult, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return completeMultipartUploadResult{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return completeMultipartUploadResult{}, err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + // Marshal complete multipart body. + completeMultipartUploadBytes, err := xml.Marshal(complete) + if err != nil { + return completeMultipartUploadResult{}, err + } + + // Instantiate all the complete multipart buffer. + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), + } + + // Execute POST to complete multipart upload for an objectName. + resp, err := c.executeMethod(ctx, "POST", reqMetadata) + defer closeResponse(resp) + if err != nil { + return completeMultipartUploadResult{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Read resp.Body into a []bytes to parse for Error response inside the body + var b []byte + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + return completeMultipartUploadResult{}, err + } + // Decode completed multipart upload response on success. + completeMultipartUploadResult := completeMultipartUploadResult{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return completeMultipartUploadResult, err + } else if completeMultipartUploadResult.Bucket == "" { + // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. + // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values + // of the members. + + // Decode completed multipart upload response on failure + completeMultipartUploadErr := ErrorResponse{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return completeMultipartUploadResult, err + } + return completeMultipartUploadResult, completeMultipartUploadErr + } + return completeMultipartUploadResult, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/api-put-object-streaming.go new file mode 100644 index 0000000..211d1c2 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-streaming.go @@ -0,0 +1,417 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "fmt" + "io" + "net/http" + "sort" + "strings" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// putObjectMultipartStream - upload a large object using +// multipart upload and streaming signature for signing payload. +// Comprehensive put object operation involving multipart uploads. +// +// Following code handles these types of readers. +// +// - *minio.Object +// - Any reader which has a method 'ReadAt()' +// +func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + + if !isObject(reader) && isReadAt(reader) { + // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader. + n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts) + } else { + n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + } + // Fall back to uploading as single PutObject operation. + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + } + return n, err +} + +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part *ObjectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part *ObjectPart // Size of the part uploaded. +} + +// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB. +// Supports all readers which implements io.ReaderAt interface +// (ReadAt method). +// +// NOTE: This function is meant to be used for all readers which +// implement io.ReaderAt which allows us for resuming multipart +// uploads but reading at an offset, which would avoid re-read the +// data which was already uploaded. Internally this function uses +// temporary files for staging all the data, these temporary files are +// cleaned automatically when the caller i.e http client closes the +// stream after uploading all the contents successfully. +func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string, + reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return 0, err + } + + // Aborts the multipart upload in progress, if the + // function returns any error, since we do not resume + // we should purge the parts which have been uploaded + // to relinquish storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Declare a channel that sends the next part number to be uploaded. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) + + // Declare a channel that sends back the response of a part upload. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) + + // Used for readability, lastPartNumber is always totalPartsCount. + lastPartNumber := totalPartsCount + + // Send each part number to the channel to be processed. + for p := 1; p <= totalPartsCount; p++ { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} + } + close(uploadPartsCh) + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= opts.getNumThreads(); w++ { + go func(partSize int64) { + // Each worker will draw from the part channel and upload in parallel. + for uploadReq := range uploadPartsCh { + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (size - lastPartSize) + partSize = lastPartSize + } + + // Get a section reader on a particular offset. + sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) + + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, + sectionReader, uploadReq.PartNum, + "", "", partSize, opts.ServerSideEncryption) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Size: 0, + Error: err, + } + // Exit the goroutine. + return + } + + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + + // Send successful part info through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: objPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + Error: nil, + } + } + }(partSize) + } + + // Gather the responses as they occur and update any + // progress bar. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return totalUploadedSize, uploadRes.Error + } + // Retrieve each uploaded part and store it to be completed. + // part, ok := partsInfo[uploadRes.PartNum] + part := uploadRes.Part + if part == nil { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) + } + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + // Store the parts to be completed in order. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Verify if we uploaded all the data. + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string, + reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + // Initiates a new multipart request + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return 0, err + } + + // Aborts the multipart upload if the function returns + // any error, since we do not resume we should purge + // the parts which have been uploaded to relinquish + // storage space. + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader := newHook(reader, opts.Progress) + + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + var objPart ObjectPart + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, + io.LimitReader(hookReader, partSize), + partNumber, "", "", partSize, opts.ServerSideEncryption) + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload) + if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// putObjectNoChecksum special function used Google Cloud Storage. This special function +// is used for Google Cloud Storage since Google's multipart API is not S3 compatible. +func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Size -1 is only supported on Google Cloud Storage, we error + // out in all other situations. + if size < 0 && !s3utils.IsGoogleEndpoint(*c.endpointURL) { + return 0, ErrEntityTooSmall(size, bucketName, objectName) + } + if size > 0 { + if isReadAt(reader) && !isObject(reader) { + seeker, _ := reader.(io.Seeker) + offset, err := seeker.Seek(0, io.SeekCurrent) + if err != nil { + return 0, ErrInvalidArgument(err.Error()) + } + reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size) + } + } + + // Update progress reader appropriately to the latest offset as we + // read from the source. + readSeeker := newHook(reader, opts.Progress) + + // This function does not calculate sha256 and md5sum for payload. + // Execute put object. + st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts) + if err != nil { + return 0, err + } + if st.Size != size { + return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) + } + return size, nil +} + +// putObjectDo - executes the put object http operation. +// NOTE: You must have WRITE permissions on a bucket to add an object to it. +func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + // Set headers. + customHeader := opts.Header() + + // Populate request metadata. + reqMetadata := requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentBody: reader, + contentLength: size, + contentMD5Base64: md5Base64, + contentSHA256Hex: sha256Hex, + } + + // Execute PUT an objectName. + resp, err := c.executeMethod(ctx, "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + var objInfo ObjectInfo + // Trim off the odd double quotes from ETag in the beginning and end. + objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") + // A success here means data was written to server successfully. + objInfo.Size = size + + // Return here. + return objInfo, nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go new file mode 100644 index 0000000..0330cd9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -0,0 +1,267 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "runtime/debug" + "sort" + + "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/s3utils" + "golang.org/x/net/http/httpguts" +) + +// PutObjectOptions represents options specified by user for PutObject call +type PutObjectOptions struct { + UserMetadata map[string]string + Progress io.Reader + ContentType string + ContentEncoding string + ContentDisposition string + ContentLanguage string + CacheControl string + ServerSideEncryption encrypt.ServerSide + NumThreads uint + StorageClass string + WebsiteRedirectLocation string +} + +// getNumThreads - gets the number of threads to be used in the multipart +// put object operation +func (opts PutObjectOptions) getNumThreads() (numThreads int) { + if opts.NumThreads > 0 { + numThreads = int(opts.NumThreads) + } else { + numThreads = totalWorkers + } + return +} + +// Header - constructs the headers from metadata entered by user in +// PutObjectOptions struct +func (opts PutObjectOptions) Header() (header http.Header) { + header = make(http.Header) + + if opts.ContentType != "" { + header["Content-Type"] = []string{opts.ContentType} + } else { + header["Content-Type"] = []string{"application/octet-stream"} + } + if opts.ContentEncoding != "" { + header["Content-Encoding"] = []string{opts.ContentEncoding} + } + if opts.ContentDisposition != "" { + header["Content-Disposition"] = []string{opts.ContentDisposition} + } + if opts.ContentLanguage != "" { + header["Content-Language"] = []string{opts.ContentLanguage} + } + if opts.CacheControl != "" { + header["Cache-Control"] = []string{opts.CacheControl} + } + if opts.ServerSideEncryption != nil { + opts.ServerSideEncryption.Marshal(header) + } + if opts.StorageClass != "" { + header[amzStorageClass] = []string{opts.StorageClass} + } + if opts.WebsiteRedirectLocation != "" { + header[amzWebsiteRedirectLocation] = []string{opts.WebsiteRedirectLocation} + } + for k, v := range opts.UserMetadata { + if !isAmzHeader(k) && !isStandardHeader(k) && !isStorageClassHeader(k) { + header["X-Amz-Meta-"+k] = []string{v} + } else { + header[k] = []string{v} + } + } + return +} + +// validate() checks if the UserMetadata map has standard headers or and raises an error if so. +func (opts PutObjectOptions) validate() (err error) { + for k, v := range opts.UserMetadata { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { + return ErrInvalidArgument(k + " unsupported user defined metadata name") + } + if !httpguts.ValidHeaderFieldValue(v) { + return ErrInvalidArgument(v + " unsupported user defined metadata value") + } + } + return nil +} + +// completedParts is a collection of parts sortable by their part numbers. +// used for sorting the uploaded parts before completing the multipart request. +type completedParts []CompletePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// PutObject creates an object in a bucket. +// +// You must have WRITE permissions on a bucket to create an object. +// +// - For size smaller than 64MiB PutObject automatically does a +// single atomic Put operation. +// - For size larger than 64MiB PutObject automatically does a +// multipart Put operation. +// - For size input as -1 PutObject does a multipart Put operation +// until input stream reaches EOF. Maximum object size that can +// be uploaded through this operation will be 5TiB. +func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64, + opts PutObjectOptions) (n int64, err error) { + return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts) +} + +func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) { + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(*c.endpointURL) { + // Do not compute MD5 for Google Cloud Storage. + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + + if c.overrideSignerType.IsV2() { + if size >= 0 && size < minPartSize { + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts) + } + if size < 0 { + return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) + } + + if size < minPartSize { + return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts) + } + // For all sizes greater than 64MiB do multipart. + return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts) +} + +func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) { + // Input validation. + if err = s3utils.CheckValidBucketName(bucketName); err != nil { + return 0, err + } + if err = s3utils.CheckValidObjectName(objectName); err != nil { + return 0, err + } + + // Total data read and written to server. should be equal to + // 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, _, err := optimalPartInfo(-1) + if err != nil { + return 0, err + } + // Initiate a new multipart upload. + uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) + if err != nil { + return 0, err + } + + defer func() { + if err != nil { + c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) + } + }() + + // Part number always starts with '1'. + partNumber := 1 + + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Create a buffer. + buf := make([]byte, partSize) + defer debug.FreeOSMemory() + + for partNumber <= totalPartsCount { + length, rErr := io.ReadFull(reader, buf) + if rErr == io.EOF && partNumber > 1 { + break + } + if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { + return 0, rErr + } + // Update progress reader appropriately to the latest offset + // as we read from the source. + rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) + + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber, + "", "", int64(length), opts.ServerSideEncryption) + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += int64(length) + + // Increment part number. + partNumber++ + + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if rErr == io.EOF { + break + } + } + + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } + + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go new file mode 100644 index 0000000..f33df4d --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -0,0 +1,303 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/xml" + "io" + "net/http" + "net/url" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// RemoveBucket deletes the bucket name. +// +// All objects (including all object versions and delete markers). +// in the bucket must be deleted before successfully attempting this request. +func (c Client) RemoveBucket(bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Execute DELETE on bucket. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Remove the location from cache on a successful delete. + c.bucketLocCache.Delete(bucketName) + + return nil +} + +// RemoveObject remove an object from a bucket. +func (c Client) RemoveObject(bucketName, objectName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + // Execute DELETE on objectName. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + // if some unexpected error happened and max retry is reached, we want to let client know + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // DeleteObject always responds with http '204' even for + // objects which do not exist. So no need to handle them + // specifically. + return nil +} + +// RemoveObjectError - container of Multi Delete S3 API error +type RemoveObjectError struct { + ObjectName string + Err error +} + +// generateRemoveMultiObjects - generate the XML request for remove multi objects request +func generateRemoveMultiObjectsRequest(objects []string) []byte { + rmObjects := []deleteObject{} + for _, obj := range objects { + rmObjects = append(rmObjects, deleteObject{Key: obj}) + } + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true}) + return xmlBytes +} + +// processRemoveMultiObjectsResponse - parse the remove multi objects web service +// and return the success/failure result status for each object +func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) { + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + err := xmlDecoder(body, rmResult) + if err != nil { + errorCh <- RemoveObjectError{ObjectName: "", Err: err} + return + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + errorCh <- RemoveObjectError{ + ObjectName: obj.Key, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + } + } +} + +// RemoveObjectsWithContext - Identical to RemoveObjects call, but accepts context to facilitate request cancellation. +func (c Client) RemoveObjectsWithContext(ctx context.Context, bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { + errorCh := make(chan RemoveObjectError, 1) + + // Validate if bucket name is valid. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: err, + } + return errorCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: ErrInvalidArgument("Objects channel cannot be nil"), + } + return errorCh + } + + // Generate and call MultiDelete S3 requests based on entries received from objectsCh + go func(errorCh chan<- RemoveObjectError) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Close error channel when Multi delete finishes. + defer close(errorCh) + + // Loop over entries by 1000 and call MultiDelete requests + for { + if finish { + break + } + count := 0 + var batch []string + + // Try to gather 1000 entries + for object := range objectsCh { + batch = append(batch, object) + if count++; count >= maxEntries { + break + } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediately + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute GET on bucket to list objects. + resp, err := c.executeMethod(ctx, "POST", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Base64: sumMD5Base64(removeBytes), + contentSHA256Hex: sum256Hex(removeBytes), + }) + if resp != nil { + if resp.StatusCode != http.StatusOK { + e := httpRespToErrorResponse(resp, bucketName, "") + errorCh <- RemoveObjectError{ObjectName: "", Err: e} + } + } + if err != nil { + for _, b := range batch { + errorCh <- RemoveObjectError{ObjectName: b, Err: err} + } + continue + } + + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) + + closeResponse(resp) + } + }(errorCh) + return errorCh +} + +// RemoveObjects removes multiple objects from a bucket. +// The list of objects to remove are received from objectsCh. +// Remove failures are sent back via error channel. +func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { + return c.RemoveObjectsWithContext(context.Background(), bucketName, objectsCh) +} + +// RemoveIncompleteUpload aborts an partially uploaded object. +func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + // Find multipart upload ids of the object to be aborted. + uploadIDs, err := c.findUploadIDs(bucketName, objectName) + if err != nil { + return err + } + + for _, uploadID := range uploadIDs { + // abort incomplete multipart upload, based on the upload id passed. + err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) + if err != nil { + return err + } + } + + return nil +} + +// abortMultipartUpload aborts a multipart upload for the given +// uploadID, all previously uploaded parts are deleted. +func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return err + } + + // Initialize url queries. + urlValues := make(url.Values) + urlValues.Set("uploadId", uploadID) + + // Execute DELETE on multipart upload. + resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + // Abort has no response body, handle it for any errors. + var errorResponse ErrorResponse + switch resp.StatusCode { + case http.StatusNotFound: + // This is needed specifically for abort and it cannot + // be converged into default case. + errorResponse = ErrorResponse{ + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + default: + return httpRespToErrorResponse(resp, bucketName, objectName) + } + return errorResponse + } + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go new file mode 100644 index 0000000..8d8880c --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go @@ -0,0 +1,245 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "time" +) + +// listAllMyBucketsResult container for listBuckets response. +type listAllMyBucketsResult struct { + // Container for one or more buckets. + Buckets struct { + Bucket []BucketInfo + } + Owner owner +} + +// owner container for bucket owner information. +type owner struct { + DisplayName string + ID string +} + +// CommonPrefix container for prefix response. +type CommonPrefix struct { + Prefix string +} + +// ListBucketV2Result container for listObjects response version 2. +type ListBucketV2Result struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + MaxKeys int64 + Name string + + // Hold the token that will be sent in the next request to fetch the next group of keys + NextContinuationToken string + + ContinuationToken string + Prefix string + + // FetchOwner and StartAfter are currently not used + FetchOwner string + StartAfter string +} + +// ListBucketResult container for listObjects response. +type ListBucketResult struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []CommonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + Marker string + MaxKeys int64 + Name string + + // When response is truncated (the IsTruncated element value in + // the response is true), you can use the key name in this field + // as marker in the subsequent request to get next set of objects. + // Object storage lists objects in alphabetical order Note: This + // element is returned only if you have delimiter request + // parameter specified. If response does not include the NextMaker + // and it is truncated, you can use the value of the last Key in + // the response as the marker in the subsequent request to get the + // next set of object keys. + NextMarker string + Prefix string +} + +// ListMultipartUploadsResult container for ListMultipartUploads response +type ListMultipartUploadsResult struct { + Bucket string + KeyMarker string + UploadIDMarker string `xml:"UploadIdMarker"` + NextKeyMarker string + NextUploadIDMarker string `xml:"NextUploadIdMarker"` + EncodingType string + MaxUploads int64 + IsTruncated bool + Uploads []ObjectMultipartInfo `xml:"Upload"` + Prefix string + Delimiter string + // A response can contain CommonPrefixes only if you specify a delimiter. + CommonPrefixes []CommonPrefix +} + +// initiator container for who initiated multipart upload. +type initiator struct { + ID string + DisplayName string +} + +// copyObjectResult container for copy object response. +type copyObjectResult struct { + ETag string + LastModified time.Time // time string format "2006-01-02T15:04:05.000Z" +} + +// ObjectPart container for particular part of an object. +type ObjectPart struct { + // Part number identifies the part. + PartNumber int + + // Date and time the part was uploaded. + LastModified time.Time + + // Entity tag returned when the part was uploaded, usually md5sum + // of the part. + ETag string + + // Size of the uploaded part data. + Size int64 +} + +// ListObjectPartsResult container for ListObjectParts response. +type ListObjectPartsResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` + + Initiator initiator + Owner owner + + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + + // Indicates whether the returned list of parts is truncated. + IsTruncated bool + ObjectParts []ObjectPart `xml:"Part"` + + EncodingType string +} + +// initiateMultipartUploadResult container for InitiateMultiPartUpload +// response. +type initiateMultipartUploadResult struct { + Bucket string + Key string + UploadID string `xml:"UploadId"` +} + +// completeMultipartUploadResult container for completed multipart +// upload response. +type completeMultipartUploadResult struct { + Location string + Bucket string + Key string + ETag string +} + +// CompletePart sub container lists individual part numbers and their +// md5sum, part of completeMultipartUpload. +type CompletePart struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` + + // Part number identifies the part. + PartNumber int + ETag string +} + +// completeMultipartUpload container for completing multipart upload. +type completeMultipartUpload struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` + Parts []CompletePart `xml:"Part"` +} + +// createBucketConfiguration container for bucket configuration. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"` + Location string `xml:"LocationConstraint"` +} + +// deleteObject container for Delete element in MultiObjects Delete XML request +type deleteObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` +} + +// deletedObject container for Deleted element in MultiObjects Delete XML response +type deletedObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` + // These fields are ignored. + DeleteMarker bool + DeleteMarkerVersionID string +} + +// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response +type nonDeletedObject struct { + Key string + Code string + Message string +} + +// deletedMultiObjects container for MultiObjects Delete XML request +type deleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool + Objects []deleteObject `xml:"Object"` +} + +// deletedMultiObjectsResult container for MultiObjects Delete XML response +type deleteMultiObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []deletedObject `xml:"Deleted"` + UnDeletedObjects []nonDeletedObject `xml:"Error"` +} diff --git a/vendor/github.com/minio/minio-go/api-select.go b/vendor/github.com/minio/minio-go/api-select.go new file mode 100644 index 0000000..10e1d47 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-select.go @@ -0,0 +1,532 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/s3utils" +) + +// CSVFileHeaderInfo - is the parameter for whether to utilize headers. +type CSVFileHeaderInfo string + +// Constants for file header info. +const ( + CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" + CSVFileHeaderInfoIgnore = "IGNORE" + CSVFileHeaderInfoUse = "USE" +) + +// SelectCompressionType - is the parameter for what type of compression is +// present +type SelectCompressionType string + +// Constants for compression types under select API. +const ( + SelectCompressionNONE SelectCompressionType = "NONE" + SelectCompressionGZIP = "GZIP" + SelectCompressionBZIP = "BZIP2" +) + +// CSVQuoteFields - is the parameter for how CSV fields are quoted. +type CSVQuoteFields string + +// Constants for csv quote styles. +const ( + CSVQuoteFieldsAlways CSVQuoteFields = "Always" + CSVQuoteFieldsAsNeeded = "AsNeeded" +) + +// QueryExpressionType - is of what syntax the expression is, this should only +// be SQL +type QueryExpressionType string + +// Constants for expression type. +const ( + QueryExpressionTypeSQL QueryExpressionType = "SQL" +) + +// JSONType determines json input serialization type. +type JSONType string + +// Constants for JSONTypes. +const ( + JSONDocumentType JSONType = "DOCUMENT" + JSONLinesType = "LINES" +) + +// ParquetInputOptions parquet input specific options +type ParquetInputOptions struct{} + +// CSVInputOptions csv input specific options +type CSVInputOptions struct { + FileHeaderInfo CSVFileHeaderInfo + RecordDelimiter string + FieldDelimiter string + QuoteCharacter string + QuoteEscapeCharacter string + Comments string +} + +// CSVOutputOptions csv output specific options +type CSVOutputOptions struct { + QuoteFields CSVQuoteFields + RecordDelimiter string + FieldDelimiter string + QuoteCharacter string + QuoteEscapeCharacter string +} + +// JSONInputOptions json input specific options +type JSONInputOptions struct { + Type JSONType +} + +// JSONOutputOptions - json output specific options +type JSONOutputOptions struct { + RecordDelimiter string +} + +// SelectObjectInputSerialization - input serialization parameters +type SelectObjectInputSerialization struct { + CompressionType SelectCompressionType + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOutputSerialization - output serialization parameters. +type SelectObjectOutputSerialization struct { + CSV *CSVOutputOptions `xml:"CSV,omitempty"` + JSON *JSONOutputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOptions - represents the input select body +type SelectObjectOptions struct { + XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` + ServerSideEncryption encrypt.ServerSide `xml:"-"` + Expression string + ExpressionType QueryExpressionType + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization + RequestProgress struct { + Enabled bool + } +} + +// Header returns the http.Header representation of the SelectObject options. +func (o SelectObjectOptions) Header() http.Header { + headers := make(http.Header) + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// SelectObjectType - is the parameter which defines what type of object the +// operation is being performed on. +type SelectObjectType string + +// Constants for input data types. +const ( + SelectObjectTypeCSV SelectObjectType = "CSV" + SelectObjectTypeJSON = "JSON" + SelectObjectTypeParquet = "Parquet" +) + +// preludeInfo is used for keeping track of necessary information from the +// prelude. +type preludeInfo struct { + totalLen uint32 + headerLen uint32 +} + +// SelectResults is used for the streaming responses from the server. +type SelectResults struct { + pipeReader *io.PipeReader + resp *http.Response + stats *StatsMessage + progress *ProgressMessage +} + +// ProgressMessage is a struct for progress xml message. +type ProgressMessage struct { + XMLName xml.Name `xml:"Progress" json:"-"` + StatsMessage +} + +// StatsMessage is a struct for stat xml message. +type StatsMessage struct { + XMLName xml.Name `xml:"Stats" json:"-"` + BytesScanned int64 + BytesProcessed int64 + BytesReturned int64 +} + +// messageType represents the type of message. +type messageType string + +const ( + errorMsg messageType = "error" + commonMsg = "event" +) + +// eventType represents the type of event. +type eventType string + +// list of event-types returned by Select API. +const ( + endEvent eventType = "End" + recordsEvent = "Records" + progressEvent = "Progress" + statsEvent = "Stats" +) + +// contentType represents content type of event. +type contentType string + +const ( + xmlContent contentType = "text/xml" +) + +// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. +func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + selectReqBytes, err := xml.Marshal(opts) + if err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Set("select", "") + urlValues.Set("select-type", "2") + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, "POST", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentMD5Base64: sumMD5Base64(selectReqBytes), + contentSHA256Hex: sum256Hex(selectReqBytes), + contentBody: bytes.NewReader(selectReqBytes), + contentLength: int64(len(selectReqBytes)), + }) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + pipeReader, pipeWriter := io.Pipe() + streamer := &SelectResults{ + resp: resp, + stats: &StatsMessage{}, + progress: &ProgressMessage{}, + pipeReader: pipeReader, + } + streamer.start(pipeWriter) + return streamer, nil +} + +// Close - closes the underlying response body and the stream reader. +func (s *SelectResults) Close() error { + defer closeResponse(s.resp) + return s.pipeReader.Close() +} + +// Read - is a reader compatible implementation for SelectObjectContent records. +func (s *SelectResults) Read(b []byte) (n int, err error) { + return s.pipeReader.Read(b) +} + +// Stats - information about a request's stats when processing is complete. +func (s *SelectResults) Stats() *StatsMessage { + return s.stats +} + +// Progress - information about the progress of a request. +func (s *SelectResults) Progress() *ProgressMessage { + return s.progress +} + +// start is the main function that decodes the large byte array into +// several events that are sent through the eventstream. +func (s *SelectResults) start(pipeWriter *io.PipeWriter) { + go func() { + for { + var prelude preludeInfo + var headers = make(http.Header) + var err error + + // Create CRC code + crc := crc32.New(crc32.IEEETable) + crcReader := io.TeeReader(s.resp.Body, crc) + + // Extract the prelude(12 bytes) into a struct to extract relevant information. + prelude, err = processPrelude(crcReader, crc) + if err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + // Extract the headers(variable bytes) into a struct to extract relevant information + if prelude.headerLen > 0 { + if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + } + + // Get the actual payload length so that the appropriate amount of + // bytes can be read or parsed. + payloadLen := prelude.PayloadLen() + + m := messageType(headers.Get("message-type")) + + switch m { + case errorMsg: + pipeWriter.CloseWithError(errors.New("Error Type of " + headers.Get("error-type") + " " + headers.Get("error-message"))) + closeResponse(s.resp) + return + case commonMsg: + // Get content-type of the payload. + c := contentType(headers.Get("content-type")) + + // Get event type of the payload. + e := eventType(headers.Get("event-type")) + + // Handle all supported events. + switch e { + case endEvent: + pipeWriter.Close() + closeResponse(s.resp) + return + case recordsEvent: + if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + case progressEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) + closeResponse(s.resp) + return + } + case statsEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) + closeResponse(s.resp) + return + } + } + } + + // Ensures that the full message's CRC is correct and + // that the message is not corrupted + if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + } + }() +} + +// PayloadLen is a function that calculates the length of the payload. +func (p preludeInfo) PayloadLen() int64 { + return int64(p.totalLen - p.headerLen - 16) +} + +// processPrelude is the function that reads the 12 bytes of the prelude and +// ensures the CRC is correct while also extracting relevant information into +// the struct, +func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { + var err error + var pInfo = preludeInfo{} + + // reads total length of the message (first 4 bytes) + pInfo.totalLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // reads total header length of the message (2nd 4 bytes) + pInfo.headerLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // checks that the CRC is correct (3rd 4 bytes) + preCRC := crc.Sum32() + if err := checkCRC(prelude, preCRC); err != nil { + return pInfo, err + } + + return pInfo, nil +} + +// extracts the relevant information from the Headers. +func extractHeader(body io.Reader, myHeaders http.Header) error { + for { + // extracts the first part of the header, + headerTypeName, err := extractHeaderType(body) + if err != nil { + // Since end of file, we have read all of our headers + if err == io.EOF { + break + } + return err + } + + // reads the 7 present in the header and ignores it. + extractUint8(body) + + headerValueName, err := extractHeaderValue(body) + if err != nil { + return err + } + + myHeaders.Set(headerTypeName, headerValueName) + + } + return nil +} + +// extractHeaderType extracts the first half of the header message, the header type. +func extractHeaderType(body io.Reader) (string, error) { + // extracts 2 bit integer + headerNameLen, err := extractUint8(body) + if err != nil { + return "", err + } + // extracts the string with the appropriate number of bytes + headerName, err := extractString(body, int(headerNameLen)) + if err != nil { + return "", err + } + return strings.TrimPrefix(headerName, ":"), nil +} + +// extractsHeaderValue extracts the second half of the header message, the +// header value +func extractHeaderValue(body io.Reader) (string, error) { + bodyLen, err := extractUint16(body) + if err != nil { + return "", err + } + bodyName, err := extractString(body, int(bodyLen)) + if err != nil { + return "", err + } + return bodyName, nil +} + +// extracts a string from byte array of a particular number of bytes. +func extractString(source io.Reader, lenBytes int) (string, error) { + myVal := make([]byte, lenBytes) + _, err := source.Read(myVal) + if err != nil { + return "", err + } + return string(myVal), nil +} + +// extractUint32 extracts a 4 byte integer from the byte array. +func extractUint32(r io.Reader) (uint32, error) { + buf := make([]byte, 4) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(buf), nil +} + +// extractUint16 extracts a 2 byte integer from the byte array. +func extractUint16(r io.Reader) (uint16, error) { + buf := make([]byte, 2) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(buf), nil +} + +// extractUint8 extracts a 1 byte integer from the byte array. +func extractUint8(r io.Reader) (uint8, error) { + buf := make([]byte, 1) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return buf[0], nil +} + +// checkCRC ensures that the CRC matches with the one from the reader. +func checkCRC(r io.Reader, expect uint32) error { + msgCRC, err := extractUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) + + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go new file mode 100644 index 0000000..91e9d39 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -0,0 +1,185 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// BucketExists verify if bucket exists and you have permission to access it. +func (c Client) BucketExists(bucketName string) (bool, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return false, err + } + + // Execute HEAD on bucketName. + resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{ + bucketName: bucketName, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + if ToErrorResponse(err).Code == "NoSuchBucket" { + return false, nil + } + return false, err + } + if resp != nil { + resperr := httpRespToErrorResponse(resp, bucketName, "") + if ToErrorResponse(resperr).Code == "NoSuchBucket" { + return false, nil + } + if resp.StatusCode != http.StatusOK { + return false, httpRespToErrorResponse(resp, bucketName, "") + } + } + return true, nil +} + +// List of header keys to be filtered, usually +// from all S3 API http responses. +var defaultFilterKeys = []string{ + "Connection", + "Transfer-Encoding", + "Accept-Ranges", + "Date", + "Server", + "Vary", + "x-amz-bucket-region", + "x-amz-request-id", + "x-amz-id-2", + "Content-Security-Policy", + "X-Xss-Protection", + + // Add new headers to be ignored. +} + +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + filterKeys := append([]string{ + "ETag", + "Content-Length", + "Last-Modified", + "Content-Type", + }, defaultFilterKeys...) + return filterHeader(header, filterKeys) +} + +// StatObject verifies if object exists and you have permission to access. +func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + return c.statObject(context.Background(), bucketName, objectName, opts) +} + +// Lower level API for statObject supporting pre-conditions and range headers. +func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return ObjectInfo{}, err + } + + // Execute HEAD on objectName. + resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Hex: emptySHA256Hex, + customHeader: opts.Header(), + }) + defer closeResponse(resp) + if err != nil { + return ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse content length is exists + var size int64 = -1 + contentLengthStr := resp.Header.Get("Content-Length") + if contentLengthStr != "" { + size, err = strconv.ParseInt(contentLengthStr, 10, 64) + if err != nil { + // Content-Length is not valid + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Content-Length is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + } + + // Parse Last-Modified has http time format. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Last-Modified time format is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + + // Fetch content type if any present. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + + // Save object metadata info. + return ObjectInfo{ + ETag: md5sum, + Key: objectName, + Size: size, + LastModified: date, + ContentType: contentType, + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + Metadata: extractObjMetadata(resp.Header), + }, nil +} diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go new file mode 100644 index 0000000..f1c5490 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api.go @@ -0,0 +1,898 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "crypto/md5" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httputil" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/publicsuffix" + + "github.com/minio/minio-go/pkg/credentials" + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" +) + +// Client implements Amazon S3 compatible methods. +type Client struct { + /// Standard options. + + // Parsed endpoint url provided by the user. + endpointURL *url.URL + + // Holds various credential providers. + credsProvider *credentials.Credentials + + // Custom signerType value overrides all credentials. + overrideSignerType credentials.SignatureType + + // User supplied. + appInfo struct { + appName string + appVersion string + } + + // Indicate whether we are using https or not + secure bool + + // Needs allocation. + httpClient *http.Client + bucketLocCache *bucketLocationCache + + // Advanced functionality. + isTraceEnabled bool + traceOutput io.Writer + + // S3 specific accelerated endpoint. + s3AccelerateEndpoint string + + // Region endpoint + region string + + // Random seed. + random *rand.Rand + + // lookup indicates type of url lookup supported by server. If not specified, + // default to Auto. + lookup BucketLookupType +} + +// Options for New method +type Options struct { + Creds *credentials.Credentials + Secure bool + Region string + BucketLookup BucketLookupType + // Add future fields here +} + +// Global constants. +const ( + libraryName = "minio-go" + libraryVersion = "v6.0.14" +) + +// User Agent should always following the below style. +// Please open an issue to discuss any new changes here. +// +// Minio (OS; ARCH) LIB/VER APP/VER +const ( + libraryUserAgentPrefix = "Minio (" + runtime.GOOS + "; " + runtime.GOARCH + ") " + libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion +) + +// BucketLookupType is type of url lookup supported by server. +type BucketLookupType int + +// Different types of url lookup supported by the server.Initialized to BucketLookupAuto +const ( + BucketLookupAuto BucketLookupType = iota + BucketLookupDNS + BucketLookupPath +) + +// NewV2 - instantiate minio client with Amazon S3 signature version +// '2' compatibility. +func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + creds := credentials.NewStaticV2(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) + if err != nil { + return nil, err + } + clnt.overrideSignerType = credentials.SignatureV2 + return clnt, nil +} + +// NewV4 - instantiate minio client with Amazon S3 signature version +// '4' compatibility. +func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) + if err != nil { + return nil, err + } + clnt.overrideSignerType = credentials.SignatureV4 + return clnt, nil +} + +// New - instantiate minio client, adds automatic verification of signature. +func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + clnt, err := privateNew(endpoint, creds, secure, "", BucketLookupAuto) + if err != nil { + return nil, err + } + // Google cloud storage should be set to signature V2, force it if not. + if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV2 + } + // If Amazon S3 set to signature v4. + if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { + clnt.overrideSignerType = credentials.SignatureV4 + } + return clnt, nil +} + +// NewWithCredentials - instantiate minio client with credentials provider +// for retrieving credentials from various credentials provider such as +// IAM, File, Env etc. +func NewWithCredentials(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) { + return privateNew(endpoint, creds, secure, region, BucketLookupAuto) +} + +// NewWithRegion - instantiate minio client, with region configured. Unlike New(), +// NewWithRegion avoids bucket-location lookup operations and it is slightly faster. +// Use this function when if your application deals with single region. +func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { + creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") + return privateNew(endpoint, creds, secure, region, BucketLookupAuto) +} + +// NewWithOptions - instantiate minio client with options +func NewWithOptions(endpoint string, opts *Options) (*Client, error) { + return privateNew(endpoint, opts.Creds, opts.Secure, opts.Region, opts.BucketLookup) +} + +// lockedRandSource provides protected rand source, implements rand.Source interface. +type lockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *lockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +// Seed uses the provided seed value to initialize the generator to a +// deterministic state. +func (r *lockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// Redirect requests by re signing the request. +func (c *Client) redirectHeaders(req *http.Request, via []*http.Request) error { + if len(via) >= 5 { + return errors.New("stopped after 5 redirects") + } + if len(via) == 0 { + return nil + } + lastRequest := via[len(via)-1] + var reAuth bool + for attr, val := range lastRequest.Header { + // if hosts do not match do not copy Authorization header + if attr == "Authorization" && req.Host != lastRequest.Host { + reAuth = true + continue + } + if _, ok := req.Header[attr]; !ok { + req.Header[attr] = val + } + } + + *c.endpointURL = *req.URL + + value, err := c.credsProvider.Get() + if err != nil { + return err + } + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + region = c.region + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if reAuth { + // Check if there is no region override, if not get it from the URL if possible. + if region == "" { + region = s3utils.GetRegionFromURL(*c.endpointURL) + } + switch { + case signerType.IsV2(): + return errors.New("signature V2 cannot support redirection") + case signerType.IsV4(): + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, getDefaultLocation(*c.endpointURL, region)) + } + } + return nil +} + +func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string, lookup BucketLookupType) (*Client, error) { + // construct endpoint. + endpointURL, err := getEndpointURL(endpoint, secure) + if err != nil { + return nil, err + } + + // Initialize cookies to preserve server sent cookies if any and replay + // them upon each request. + jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return nil, err + } + + // instantiate new Client. + clnt := new(Client) + + // Save the credentials. + clnt.credsProvider = creds + + // Remember whether we are using https or not + clnt.secure = secure + + // Save endpoint URL, user agent for future uses. + clnt.endpointURL = endpointURL + + // Instantiate http client and bucket location cache. + clnt.httpClient = &http.Client{ + Jar: jar, + Transport: DefaultTransport, + CheckRedirect: clnt.redirectHeaders, + } + + // Sets custom region, if region is empty bucket location cache is used automatically. + if region == "" { + region = s3utils.GetRegionFromURL(*clnt.endpointURL) + } + clnt.region = region + + // Instantiate bucket location cache. + clnt.bucketLocCache = newBucketLocationCache() + + // Introduce a new locked random seed. + clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + + // Sets bucket lookup style, whether server accepts DNS or Path lookup. Default is Auto - determined + // by the SDK. When Auto is specified, DNS lookup is used for Amazon/Google cloud endpoints and Path for all other endpoints. + clnt.lookup = lookup + // Return. + return clnt, nil +} + +// SetAppInfo - add application details to user agent. +func (c *Client) SetAppInfo(appName string, appVersion string) { + // if app name and version not set, we do not set a new user agent. + if appName != "" && appVersion != "" { + c.appInfo = struct { + appName string + appVersion string + }{} + c.appInfo.appName = appName + c.appInfo.appVersion = appVersion + } +} + +// SetCustomTransport - set new custom transport. +func (c *Client) SetCustomTransport(customHTTPTransport http.RoundTripper) { + // Set this to override default transport + // ``http.DefaultTransport``. + // + // This transport is usually needed for debugging OR to add your + // own custom TLS certificates on the client transport, for custom + // CA's and certs which are not part of standard certificate + // authority follow this example :- + // + // tr := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + // } + // api.SetCustomTransport(tr) + // + if c.httpClient != nil { + c.httpClient.Transport = customHTTPTransport + } +} + +// TraceOn - enable HTTP tracing. +func (c *Client) TraceOn(outputStream io.Writer) { + // if outputStream is nil then default to os.Stdout. + if outputStream == nil { + outputStream = os.Stdout + } + // Sets a new output stream. + c.traceOutput = outputStream + + // Enable tracing. + c.isTraceEnabled = true +} + +// TraceOff - disable HTTP tracing. +func (c *Client) TraceOff() { + // Disable tracing. + c.isTraceEnabled = false +} + +// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your +// requests. This feature is only specific to S3 for all other endpoints this +// function does nothing. To read further details on s3 transfer acceleration +// please vist - +// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + c.s3AccelerateEndpoint = accelerateEndpoint + } +} + +// Hash materials provides relevant initialized hash algo writers +// based on the expected signature type. +// +// - For signature v4 request if the connection is insecure compute only sha256. +// - For signature v4 request if the connection is secure compute only md5. +// - For anonymous request compute md5. +func (c *Client) hashMaterials() (hashAlgos map[string]hash.Hash, hashSums map[string][]byte) { + hashSums = make(map[string][]byte) + hashAlgos = make(map[string]hash.Hash) + if c.overrideSignerType.IsV4() { + if c.secure { + hashAlgos["md5"] = md5.New() + } else { + hashAlgos["sha256"] = sha256.New() + } + } else { + if c.overrideSignerType.IsAnonymous() { + hashAlgos["md5"] = md5.New() + } + } + return hashAlgos, hashSums +} + +// requestMetadata - is container for all the values to make a request. +type requestMetadata struct { + // If set newRequest presigns the URL. + presignURL bool + + // User supplied. + bucketName string + objectName string + queryValues url.Values + customHeader http.Header + expires int64 + + // Generated by our internal code. + bucketLocation string + contentBody io.Reader + contentLength int64 + contentMD5Base64 string // carries base64 encoded md5sum + contentSHA256Hex string // carries hex encoded sha256sum +} + +// dumpHTTP - dump HTTP request and response. +func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { + // Starts http dump. + _, err := fmt.Fprintln(c.traceOutput, "---------START-HTTP---------") + if err != nil { + return err + } + + // Filter out Signature field from Authorization header. + origAuth := req.Header.Get("Authorization") + if origAuth != "" { + req.Header.Set("Authorization", redactSignature(origAuth)) + } + + // Only display request header. + reqTrace, err := httputil.DumpRequestOut(req, false) + if err != nil { + return err + } + + // Write request to trace output. + _, err = fmt.Fprint(c.traceOutput, string(reqTrace)) + if err != nil { + return err + } + + // Only display response header. + var respTrace []byte + + // For errors we make sure to dump response body as well. + if resp.StatusCode != http.StatusOK && + resp.StatusCode != http.StatusPartialContent && + resp.StatusCode != http.StatusNoContent { + respTrace, err = httputil.DumpResponse(resp, true) + if err != nil { + return err + } + } else { + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err + } + } + + // Write response to trace output. + _, err = fmt.Fprint(c.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) + if err != nil { + return err + } + + // Ends the http dump. + _, err = fmt.Fprintln(c.traceOutput, "---------END-HTTP---------") + if err != nil { + return err + } + + // Returns success. + return nil +} + +// do - execute http request. +func (c Client) do(req *http.Request) (*http.Response, error) { + resp, err := c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang versions fix this issue properly. + if urlErr, ok := err.(*url.Error); ok { + if strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), + } + } + } + return nil, err + } + + // Response cannot be non-nil, report error if thats the case. + if resp == nil { + msg := "Response is empty. " + reportIssue + return nil, ErrInvalidArgument(msg) + } + + // If trace is enabled, dump http request and response. + if c.isTraceEnabled { + err = c.dumpHTTP(req, resp) + if err != nil { + return nil, err + } + } + + return resp, nil +} + +// List of success status. +var successStatus = []int{ + http.StatusOK, + http.StatusNoContent, + http.StatusPartialContent, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) { + var isRetryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + var reqRetry = MaxRetry // Indicates how many times we can retry the request + + if metadata.contentBody != nil { + // Check if body is seekable then it is retryable. + bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) + switch bodySeeker { + case os.Stdin, os.Stdout, os.Stderr: + isRetryable = false + } + // Retry only when reader is seekable + if !isRetryable { + reqRetry = 1 + } + + // Figure out if the body can be closed - if yes + // we will definitely close it upon the function + // return. + bodyCloser, ok := metadata.contentBody.(io.Closer) + if ok { + defer bodyCloser.Close() + } + } + + // Create a done channel to control 'newRetryTimer' go routine. + doneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Blank indentifier is kept here on purpose since 'range' without + // blank identifiers is only supported since go1.4 + // https://golang.org/doc/go1.4#forrange. + for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { + // Retry executes the following function body if request has an + // error until maxRetries have been exhausted, retry attempts are + // performed after waiting for a given period of time in a + // binomial fashion. + if isRetryable { + // Seek back to beginning for each attempt. + if _, err = bodySeeker.Seek(0, 0); err != nil { + // If seek failed, no need to retry. + return nil, err + } + } + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(method, metadata) + if err != nil { + errResponse := ToErrorResponse(err) + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + return nil, err + } + + // Add context to request + req = req.WithContext(ctx) + + // Initiate the request. + res, err = c.do(req) + if err != nil { + // For supported http requests errors verify. + if isHTTPReqErrorRetryable(err) { + continue // Retry. + } + // For other errors, return here no need to retry. + return nil, err + } + + // For any known successful http status, return quickly. + for _, httpStatus := range successStatus { + if httpStatus == res.StatusCode { + return res, nil + } + } + + // Read the body to be saved later. + errBodyBytes, err := ioutil.ReadAll(res.Body) + // res.Body should be closed + closeResponse(res) + if err != nil { + return nil, err + } + + // Save the body. + errBodySeeker := bytes.NewReader(errBodyBytes) + res.Body = ioutil.NopCloser(errBodySeeker) + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = ioutil.NopCloser(errBodySeeker) + + // Bucket region if set in error response and the error + // code dictates invalid region, we can retry the request + // with the new region. + // + // Additionally we should only retry if bucketLocation and custom + // region is empty. + if metadata.bucketLocation == "" && c.region == "" { + if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" { + if metadata.bucketName != "" && errResponse.Region != "" { + // Gather Cached location only if bucketName is present. + if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } + } + } + } + + // Verify if error response code is retryable. + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + // Verify if http status code is retryable. + if isHTTPStatusRetryable(res.StatusCode) { + continue // Retry. + } + + // For all other cases break out of the retry loop. + break + } + return res, err +} + +// newRequest - instantiate a new HTTP request for a given method. +func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { + // If no method is supplied default to 'POST'. + if method == "" { + method = "POST" + } + + location := metadata.bucketLocation + if location == "" { + if metadata.bucketName != "" { + // Gather location only if bucketName is present. + location, err = c.getBucketLocation(metadata.bucketName) + if err != nil { + if ToErrorResponse(err).Code != "AccessDenied" { + return nil, err + } + } + // Upon AccessDenied error on fetching bucket location, default + // to possible locations based on endpoint URL. This can usually + // happen when GetBucketLocation() is disabled using IAM policies. + } + if location == "" { + location = getDefaultLocation(*c.endpointURL, c.region) + } + } + + // Look if target url supports virtual host. + isVirtualHost := c.isVirtualHostStyleRequest(*c.endpointURL, metadata.bucketName) + + // Construct a new target URL. + targetURL, err := c.makeTargetURL(metadata.bucketName, metadata.objectName, location, isVirtualHost, metadata.queryValues) + if err != nil { + return nil, err + } + + // Initialize a new HTTP request for the method. + req, err = http.NewRequest(method, targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + // Generate presign url if needed, return right here. + if metadata.expires != 0 && metadata.presignURL { + if signerType.IsAnonymous() { + return nil, ErrInvalidArgument("Presigned URLs cannot be generated with anonymous credentials.") + } + if signerType.IsV2() { + // Presign URL with signature v2. + req = s3signer.PreSignV2(*req, accessKeyID, secretAccessKey, metadata.expires, isVirtualHost) + } else if signerType.IsV4() { + // Presign URL with signature v4. + req = s3signer.PreSignV4(*req, accessKeyID, secretAccessKey, sessionToken, location, metadata.expires) + } + return req, nil + } + + // Set 'User-Agent' header for the request. + c.setUserAgent(req) + + // Set all headers. + for k, v := range metadata.customHeader { + req.Header.Set(k, v[0]) + } + + // Go net/http notoriously closes the request body. + // - The request Body, if non-nil, will be closed by the underlying Transport, even on errors. + // This can cause underlying *os.File seekers to fail, avoid that + // by making sure to wrap the closer as a nop. + if metadata.contentLength == 0 { + req.Body = nil + } else { + req.Body = ioutil.NopCloser(metadata.contentBody) + } + + // Set incoming content-length. + req.ContentLength = metadata.contentLength + if req.ContentLength <= -1 { + // For unknown content length, we upload using transfer-encoding: chunked. + req.TransferEncoding = []string{"chunked"} + } + + // set md5Sum for content protection. + if len(metadata.contentMD5Base64) > 0 { + req.Header.Set("Content-Md5", metadata.contentMD5Base64) + } + + // For anonymous requests just return. + if signerType.IsAnonymous() { + return req, nil + } + + switch { + case signerType.IsV2(): + // Add signature version '2' authorization header. + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure: + // Streaming signature is used by default for a PUT object request. Additionally we also + // look if the initialized client is secure, if yes then we don't need to perform + // streaming signature. + req = s3signer.StreamingSignV4(req, accessKeyID, + secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC()) + default: + // Set sha256 sum for signature calculation only with signature version '4'. + shaHeader := unsignedPayload + if metadata.contentSHA256Hex != "" { + shaHeader = metadata.contentSHA256Hex + } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + // Add signature version '4' authorization header. + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) + } + + // Return request. + return req, nil +} + +// set User agent. +func (c Client) setUserAgent(req *http.Request) { + req.Header.Set("User-Agent", libraryUserAgent) + if c.appInfo.appName != "" && c.appInfo.appVersion != "" { + req.Header.Set("User-Agent", libraryUserAgent+" "+c.appInfo.appName+"/"+c.appInfo.appVersion) + } +} + +// makeTargetURL make a new target url. +func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isVirtualHostStyle bool, queryValues url.Values) (*url.URL, error) { + host := c.endpointURL.Host + // For Amazon S3 endpoint, try to fetch location based endpoint. + if s3utils.IsAmazonEndpoint(*c.endpointURL) { + if c.s3AccelerateEndpoint != "" && bucketName != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + // Disable transfer acceleration for non-compliant bucket names. + if strings.Contains(bucketName, ".") { + return nil, ErrTransferAccelerationBucket(bucketName) + } + // If transfer acceleration is requested set new host. + // For more details about enabling transfer acceleration read here. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + host = c.s3AccelerateEndpoint + } else { + // Do not change the host if the endpoint URL is a FIPS S3 endpoint. + if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } + } + } + + // Save scheme. + scheme := c.endpointURL.Scheme + + // Strip port 80 and 443 so we won't send these ports in Host header. + // The reason is that browsers and curl automatically remove :80 and :443 + // with the generated presigned urls, then a signature mismatch error. + if h, p, err := net.SplitHostPort(host); err == nil { + if scheme == "http" && p == "80" || scheme == "https" && p == "443" { + host = h + } + } + + urlStr := scheme + "://" + host + "/" + // Make URL only if bucketName is available, otherwise use the + // endpoint URL. + if bucketName != "" { + // If endpoint supports virtual host style use that always. + // Currently only S3 and Google Cloud Storage would support + // virtual host style. + if isVirtualHostStyle { + urlStr = scheme + "://" + bucketName + "." + host + "/" + if objectName != "" { + urlStr = urlStr + s3utils.EncodePath(objectName) + } + } else { + // If not fall back to using path style. + urlStr = urlStr + bucketName + "/" + if objectName != "" { + urlStr = urlStr + s3utils.EncodePath(objectName) + } + } + } + + // If there are any query values, add them to the end. + if len(queryValues) > 0 { + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) + } + + return url.Parse(urlStr) +} + +// returns true if virtual hosted style requests are to be used. +func (c *Client) isVirtualHostStyleRequest(url url.URL, bucketName string) bool { + if bucketName == "" { + return false + } + + if c.lookup == BucketLookupDNS { + return true + } + if c.lookup == BucketLookupPath { + return false + } + + // default to virtual only for Amazon/Google storage. In all other cases use + // path style requests + return s3utils.IsVirtualHostSupported(url, bucketName) +} diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml new file mode 100644 index 0000000..48ea6e7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -0,0 +1,39 @@ +# version format +version: "{build}" + +# Operating system (build VM template) +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\minio\minio-go + +# environment variables +environment: + GOPATH: c:\gopath + GO15VENDOREXPERIMENT: 1 + +# scripts that run after cloning repository +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version + - go env + - go get -u golang.org/x/lint/golint + - go get -u github.com/remyoudompheng/go-misc/deadcode + - go get -u github.com/gordonklaus/ineffassign + - go get -u golang.org/x/crypto/argon2 + - go get -t ./... + +# to run your custom scripts instead of automatic MSBuild +build_script: + - go vet ./... + - gofmt -s -l . + - golint -set_exit_status github.com/minio/minio-go... + - deadcode + - ineffassign . + - go test -short -v + - go test -short -race -v + +# to disable automatic tests +test: off + +# to disable deployment +deploy: off diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go new file mode 100644 index 0000000..cac7ad7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/bucket-cache.go @@ -0,0 +1,221 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "net/url" + "path" + "sync" + + "github.com/minio/minio-go/pkg/credentials" + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" +) + +// bucketLocationCache - Provides simple mechanism to hold bucket +// locations in memory. +type bucketLocationCache struct { + // mutex is used for handling the concurrent + // read/write requests for cache. + sync.RWMutex + + // items holds the cached bucket locations. + items map[string]string +} + +// newBucketLocationCache - Provides a new bucket location cache to be +// used internally with the client object. +func newBucketLocationCache() *bucketLocationCache { + return &bucketLocationCache{ + items: make(map[string]string), + } +} + +// Get - Returns a value of a given key if it exists. +func (r *bucketLocationCache) Get(bucketName string) (location string, ok bool) { + r.RLock() + defer r.RUnlock() + location, ok = r.items[bucketName] + return +} + +// Set - Will persist a value into cache. +func (r *bucketLocationCache) Set(bucketName string, location string) { + r.Lock() + defer r.Unlock() + r.items[bucketName] = location +} + +// Delete - Deletes a bucket name from cache. +func (r *bucketLocationCache) Delete(bucketName string) { + r.Lock() + defer r.Unlock() + delete(r.items, bucketName) +} + +// GetBucketLocation - get location for the bucket name from location cache, if not +// fetch freshly by making a new request. +func (c Client) GetBucketLocation(bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + return c.getBucketLocation(bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache, if not +// fetch freshly by making a new request. +func (c Client) getBucketLocation(bucketName string) (string, error) { + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + + // Region set then no need to fetch bucket location. + if c.region != "" { + return c.region, nil + } + + if location, ok := c.bucketLocCache.Get(bucketName); ok { + return location, nil + } + + // Initialize a new request. + req, err := c.getBucketLocationRequest(bucketName) + if err != nil { + return "", err + } + + // Initiate the request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return "", err + } + location, err := processBucketLocationResponse(resp, bucketName) + if err != nil { + return "", err + } + c.bucketLocCache.Set(bucketName, location) + return location, nil +} + +// processes the getBucketLocation http response from the server. +func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { + if resp != nil { + if resp.StatusCode != http.StatusOK { + err = httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + // For access denied error, it could be an anonymous + // request. Move forward and let the top level callers + // succeed if possible based on their policy. + if errResp.Code == "AccessDenied" { + return "us-east-1", nil + } + return "", err + } + } + + // Extract location. + var locationConstraint string + err = xmlDecoder(resp.Body, &locationConstraint) + if err != nil { + return "", err + } + + location := locationConstraint + // Location is empty will be 'us-east-1'. + if location == "" { + location = "us-east-1" + } + + // Location can be 'EU' convert it to meaningful 'eu-west-1'. + if location == "EU" { + location = "eu-west-1" + } + + // Save the location into cache. + + // Return. + return location, nil +} + +// getBucketLocationRequest - Wrapper creates a new getBucketLocation request. +func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, error) { + // Set location query. + urlValues := make(url.Values) + urlValues.Set("location", "") + + // Set get bucket location always as path style. + targetURL := c.endpointURL + targetURL.Path = path.Join(bucketName, "") + "/" + targetURL.RawQuery = urlValues.Encode() + + // Get a new HTTP request for the method. + req, err := http.NewRequest("GET", targetURL.String(), nil) + if err != nil { + return nil, err + } + + // Set UserAgent for the request. + c.setUserAgent(req) + + // Get credentials from the configured credentials provider. + value, err := c.credsProvider.Get() + if err != nil { + return nil, err + } + + var ( + signerType = value.SignerType + accessKeyID = value.AccessKeyID + secretAccessKey = value.SecretAccessKey + sessionToken = value.SessionToken + ) + + // Custom signer set then override the behavior. + if c.overrideSignerType != credentials.SignatureDefault { + signerType = c.overrideSignerType + } + + // If signerType returned by credentials helper is anonymous, + // then do not sign regardless of signerType override. + if value.SignerType == credentials.SignatureAnonymous { + signerType = credentials.SignatureAnonymous + } + + if signerType.IsAnonymous() { + return req, nil + } + + if signerType.IsV2() { + // Get Bucket Location calls should be always path style + isVirtualHost := false + req = s3signer.SignV2(*req, accessKeyID, secretAccessKey, isVirtualHost) + return req, nil + } + + // Set sha256 sum for signature calculation only with signature version '4'. + contentSha256 := emptySHA256Hex + if c.secure { + contentSha256 = unsignedPayload + } + + req.Header.Set("X-Amz-Content-Sha256", contentSha256) + req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1") + return req, nil +} diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go new file mode 100644 index 0000000..ea303dd --- /dev/null +++ b/vendor/github.com/minio/minio-go/bucket-notification.go @@ -0,0 +1,273 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + + "github.com/minio/minio-go/pkg/set" +) + +// NotificationEventType is a S3 notification event associated to the bucket notification configuration +type NotificationEventType string + +// The role of all event types are described in : +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +const ( + ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*" + ObjectCreatedPut = "s3:ObjectCreated:Put" + ObjectCreatedPost = "s3:ObjectCreated:Post" + ObjectCreatedCopy = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectAccessedGet = "s3:ObjectAccessed:Get" + ObjectAccessedHead = "s3:ObjectAccessed:Head" + ObjectAccessedAll = "s3:ObjectAccessed:*" + ObjectRemovedAll = "s3:ObjectRemoved:*" + ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" +) + +// FilterRule - child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// S3Key - child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type S3Key struct { + FilterRules []FilterRule `xml:"FilterRule,omitempty"` +} + +// Filter - a tag in the notification xml structure which carries +// suffix/prefix filters +type Filter struct { + S3Key S3Key `xml:"S3Key,omitempty"` +} + +// Arn - holds ARN information that will be sent to the web service, +// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +type Arn struct { + Partition string + Service string + Region string + AccountID string + Resource string +} + +// NewArn creates new ARN based on the given partition, service, region, account id and resource +func NewArn(partition, service, region, accountID, resource string) Arn { + return Arn{Partition: partition, + Service: service, + Region: region, + AccountID: accountID, + Resource: resource} +} + +// Return the string format of the ARN +func (arn Arn) String() string { + return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource +} + +// NotificationConfig - represents one single notification configuration +// such as topic, queue or lambda configuration. +type NotificationConfig struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []NotificationEventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` +} + +// NewNotificationConfig creates one notification config and sets the given ARN +func NewNotificationConfig(arn Arn) NotificationConfig { + return NotificationConfig{Arn: arn, Filter: &Filter{}} +} + +// AddEvents adds one event to the current notification config +func (t *NotificationConfig) AddEvents(events ...NotificationEventType) { + t.Events = append(t.Events, events...) +} + +// AddFilterSuffix sets the suffix configuration to the current notification config +func (t *NotificationConfig) AddFilterSuffix(suffix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "suffix", Value: suffix} + // Replace any suffix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "suffix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// AddFilterPrefix sets the prefix configuration to the current notification config +func (t *NotificationConfig) AddFilterPrefix(prefix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "prefix", Value: prefix} + // Replace any prefix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "prefix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// TopicConfig carries one single topic notification configuration +type TopicConfig struct { + NotificationConfig + Topic string `xml:"Topic"` +} + +// QueueConfig carries one single queue notification configuration +type QueueConfig struct { + NotificationConfig + Queue string `xml:"Queue"` +} + +// LambdaConfig carries one single cloudfunction notification configuration +type LambdaConfig struct { + NotificationConfig + Lambda string `xml:"CloudFunction"` +} + +// BucketNotification - the struct that represents the whole XML to be sent to the web service +type BucketNotification struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` + TopicConfigs []TopicConfig `xml:"TopicConfiguration"` + QueueConfigs []QueueConfig `xml:"QueueConfiguration"` +} + +// AddTopic adds a given topic config to the general bucket notification config +func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) bool { + newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} + for _, n := range b.TopicConfigs { + // If new config matches existing one + if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range topicConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) + return true +} + +// AddQueue adds a given queue config to the general bucket notification config +func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) bool { + newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} + for _, n := range b.QueueConfigs { + if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range queueConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) + return true +} + +// AddLambda adds a given lambda config to the general bucket notification config +func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) bool { + newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} + for _, n := range b.LambdaConfigs { + if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter { + + existingConfig := set.NewStringSet() + for _, v := range n.Events { + existingConfig.Add(string(v)) + } + + newConfig := set.NewStringSet() + for _, v := range lambdaConfig.Events { + newConfig.Add(string(v)) + } + + if !newConfig.Intersection(existingConfig).IsEmpty() { + return false + } + } + } + b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) + return true +} + +// RemoveTopicByArn removes all topic configurations that match the exact specified ARN +func (b *BucketNotification) RemoveTopicByArn(arn Arn) { + var topics []TopicConfig + for _, topic := range b.TopicConfigs { + if topic.Topic != arn.String() { + topics = append(topics, topic) + } + } + b.TopicConfigs = topics +} + +// RemoveQueueByArn removes all queue configurations that match the exact specified ARN +func (b *BucketNotification) RemoveQueueByArn(arn Arn) { + var queues []QueueConfig + for _, queue := range b.QueueConfigs { + if queue.Queue != arn.String() { + queues = append(queues, queue) + } + } + b.QueueConfigs = queues +} + +// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN +func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { + var lambdas []LambdaConfig + for _, lambda := range b.LambdaConfigs { + if lambda.Lambda != arn.String() { + lambdas = append(lambdas, lambda) + } + } + b.LambdaConfigs = lambdas +} diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go new file mode 100644 index 0000000..7377423 --- /dev/null +++ b/vendor/github.com/minio/minio-go/constants.go @@ -0,0 +1,62 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +/// Multipart upload defaults. + +// absMinPartSize - absolute minimum part size (5 MiB) below which +// a part in a multipart upload may not be uploaded. +const absMinPartSize = 1024 * 1024 * 5 + +// minPartSize - minimum part size 64MiB per object after which +// putObject behaves internally as multipart. +const minPartSize = 1024 * 1024 * 64 + +// maxPartsCount - maximum number of parts for a single multipart session. +const maxPartsCount = 10000 + +// maxPartSize - maximum part size 5GiB for a single multipart upload +// operation. +const maxPartSize = 1024 * 1024 * 1024 * 5 + +// maxSinglePutObjectSize - maximum size 5GiB of object per PUT +// operation. +const maxSinglePutObjectSize = 1024 * 1024 * 1024 * 5 + +// maxMultipartPutObjectSize - maximum size 5TiB of object for +// Multipart operation. +const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// Total number of parallel workers used for multipart operation. +const totalWorkers = 4 + +// Signature related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" +) + +// Storage class header constant. +const amzStorageClass = "X-Amz-Storage-Class" + +// Website redirect location header constant +const amzWebsiteRedirectLocation = "X-Amz-Website-Redirect-Location" diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go new file mode 100644 index 0000000..4d51363 --- /dev/null +++ b/vendor/github.com/minio/minio-go/core.go @@ -0,0 +1,153 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io" + "strings" + + "github.com/minio/minio-go/pkg/encrypt" +) + +// Core - Inherits Client and adds new methods to expose the low level S3 APIs. +type Core struct { + *Client +} + +// NewCore - Returns new initialized a Core client, this CoreClient should be +// only used under special conditions such as need to access lower primitives +// and being able to use them to write your own wrappers. +func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) { + var s3Client Core + client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure) + if err != nil { + return nil, err + } + s3Client.Client = client + return &s3Client, nil +} + +// ListObjects - List all the objects at a prefix, optionally with marker and delimiter +// you can further filter the results. +func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { + return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys) +} + +// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses +// continuationToken instead of marker to support iteration over the results. +func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { + return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys, startAfter) +} + +// CopyObject - copies an object from source object to destination object on server side. +func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) { + return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata) +} + +// CopyObjectPart - creates a part in a multipart upload by copying (a +// part of) an existing object. +func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string, + partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) { + + return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID, + partID, startOffset, length, metadata) +} + +// PutObject - Upload object. Uploads using single PUT call. +func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) { + opts := PutObjectOptions{} + m := make(map[string]string) + for k, v := range metadata { + if strings.ToLower(k) == "content-encoding" { + opts.ContentEncoding = v + } else if strings.ToLower(k) == "content-disposition" { + opts.ContentDisposition = v + } else if strings.ToLower(k) == "content-language" { + opts.ContentLanguage = v + } else if strings.ToLower(k) == "content-type" { + opts.ContentType = v + } else if strings.ToLower(k) == "cache-control" { + opts.CacheControl = v + } else if strings.ToLower(k) == strings.ToLower(amzWebsiteRedirectLocation) { + opts.WebsiteRedirectLocation = v + } else { + m[k] = metadata[k] + } + } + opts.UserMetadata = m + opts.ServerSideEncryption = sse + return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts) +} + +// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID. +func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts) + return result.UploadID, err +} + +// ListMultipartUploads - List incomplete uploads. +func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { + return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) +} + +// PutObjectPart - Upload an object part. +func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { + return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) +} + +// ListObjectParts - List uploaded parts of an incomplete upload.x +func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { + return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts) +} + +// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. +func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) { + res, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ + Parts: parts, + }) + return res.ETag, err +} + +// AbortMultipartUpload - Abort an incomplete upload. +func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { + return c.abortMultipartUpload(context.Background(), bucket, object, uploadID) +} + +// GetBucketPolicy - fetches bucket access policy for a given bucket. +func (c Core) GetBucketPolicy(bucket string) (string, error) { + return c.getBucketPolicy(bucket) +} + +// PutBucketPolicy - applies a new bucket access policy for a given bucket. +func (c Core) PutBucketPolicy(bucket, bucketPolicy string) error { + return c.putBucketPolicy(bucket, bucketPolicy) +} + +// GetObject is a lower level API implemented to support reading +// partial objects and also downloading objects with special conditions +// matching etag, modtime etc. +func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) { + return c.getObject(context.Background(), bucketName, objectName, opts) +} + +// StatObject is a lower level API implemented to support special +// conditions matching etag, modtime on a request. +func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) { + return c.statObject(context.Background(), bucketName, objectName, opts) +} diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go new file mode 100644 index 0000000..8f32291 --- /dev/null +++ b/vendor/github.com/minio/minio-go/hook-reader.go @@ -0,0 +1,71 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "io" + +// hookReader hooks additional reader in the source stream. It is +// useful for making progress bars. Second reader is appropriately +// notified about the exact number of bytes read from the primary +// source on each Read operation. +type hookReader struct { + source io.Reader + hook io.Reader +} + +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + return sourceSeeker.Seek(offset, whence) + } + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + return hookSeeker.Seek(offset, whence) + } + return n, nil +} + +// Read implements io.Reader. Always reads from the source, the return +// value 'n' number of bytes are reported through the hook. Returns +// error for all non io.EOF conditions. +func (hr *hookReader) Read(b []byte) (n int, err error) { + n, err = hr.source.Read(b) + if err != nil && err != io.EOF { + return n, err + } + // Progress the hook with the total read bytes from the source. + if _, herr := hr.hook.Read(b[:n]); herr != nil { + if herr != io.EOF { + return n, herr + } + } + return n, err +} + +// newHook returns a io.ReadSeeker which implements hookReader that +// reports the data read from the source to the hook. +func newHook(source, hook io.Reader) io.Reader { + if hook == nil { + return source + } + return &hookReader{source, hook} +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/chain.go b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go new file mode 100644 index 0000000..e29826f --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/chain.go @@ -0,0 +1,89 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Chain will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The Chain provides a way of chaining multiple providers together +// which will pick the first available using priority order of the +// Providers in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the no credentials value. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again after IsExpired() is true. +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvAWSS3{}, +// &credentials.EnvMinio{}, +// }) +// +// // Usage of ChainCredentials. +// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") +// if err != nil { +// log.Fatalln(err) +// } +// +type Chain struct { + Providers []Provider + curr Provider +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return New(&Chain{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value, returns no credentials(anonymous) +// if no credentials provider returned any value. +// +// If a provider is found with credentials, it will be cached and any calls +// to IsExpired() will return the expired state of the cached provider. +func (c *Chain) Retrieve() (Value, error) { + for _, p := range c.Providers { + creds, _ := p.Retrieve() + // Always prioritize non-anonymous providers, if any. + if creds.AccessKeyID == "" && creds.SecretAccessKey == "" { + continue + } + c.curr = p + return creds, nil + } + // At this point we have exhausted all the providers and + // are left without any credentials return anonymous. + return Value{ + SignerType: SignatureAnonymous, + }, nil +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *Chain) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample new file mode 100644 index 0000000..130746f --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/config.json.sample @@ -0,0 +1,17 @@ +{ + "version": "8", + "hosts": { + "play": { + "url": "https://play.minio.io:9000", + "accessKey": "Q3AM3UQ867SPQQA43P2F", + "secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", + "api": "S3v2" + }, + "s3": { + "url": "https://s3.amazonaws.com", + "accessKey": "accessKey", + "secretKey": "secret", + "api": "S3v4" + } + } +} \ No newline at end of file diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go new file mode 100644 index 0000000..4bfdad4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.go @@ -0,0 +1,175 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "sync" + "time" +) + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Signature Type. + SignerType SignatureType +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type IAMCredentialProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// Credentials - A container for synchronous safe retrieval of credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sync.Mutex + + creds Value + forceRefresh bool + provider Provider +} + +// New returns a pointer to a new Credentials with the provider set. +func New(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.Lock() + defer c.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.Lock() + defer c.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be refreshed. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.Lock() + defer c.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample new file mode 100644 index 0000000..7fc91d9 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/credentials.sample @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/doc.go b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go new file mode 100644 index 0000000..c48784b --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/doc.go @@ -0,0 +1,62 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package credentials provides credential retrieval and management +// for S3 compatible object storage. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewFromEnv() +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewFromIAM("") +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go new file mode 100644 index 0000000..f9b2cc3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_aws.go @@ -0,0 +1,71 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvAWS retrieves credentials from the environment variables of the +// running process. EnvAWSironment credentials never expire. +// +// EnvAWSironment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY. +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY. +// * Secret Token: AWS_SESSION_TOKEN. +type EnvAWS struct { + retrieved bool +} + +// NewEnvAWS returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvAWS() *Credentials { + return New(&EnvAWS{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvAWS) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvAWS) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go new file mode 100644 index 0000000..d72e771 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/env_minio.go @@ -0,0 +1,62 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "os" + +// A EnvMinio retrieves credentials from the environment variables of the +// running process. EnvMinioironment credentials never expire. +// +// EnvMinioironment variables used: +// +// * Access Key ID: MINIO_ACCESS_KEY. +// * Secret Access Key: MINIO_SECRET_KEY. +type EnvMinio struct { + retrieved bool +} + +// NewEnvMinio returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvMinio() *Credentials { + return New(&EnvMinio{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvMinio) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("MINIO_ACCESS_KEY") + secret := os.Getenv("MINIO_SECRET_KEY") + + signerType := SignatureV4 + if id == "" || secret == "" { + signerType = SignatureAnonymous + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SignerType: signerType, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvMinio) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go new file mode 100644 index 0000000..5ad6830 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_aws_credentials.go @@ -0,0 +1,120 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "os" + "path/filepath" + + "github.com/go-ini/ini" + homedir "github.com/mitchellh/go-homedir" +) + +// A FileAWSCredentials retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type FileAWSCredentials struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileAWSCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewFileAWSCredentials(filename string, profile string) *Credentials { + return New(&FileAWSCredentials{ + filename: filename, + profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileAWSCredentials) Retrieve() (Value, error) { + if p.filename == "" { + p.filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE") + if p.filename == "" { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".aws", "credentials") + } + } + if p.profile == "" { + p.profile = os.Getenv("AWS_PROFILE") + if p.profile == "" { + p.profile = "default" + } + } + + p.retrieved = false + + iniProfile, err := loadProfile(p.filename, p.profile) + if err != nil { + return Value{}, err + } + + // Default to empty string if not found. + id := iniProfile.Key("aws_access_key_id") + // Default to empty string if not found. + secret := iniProfile.Key("aws_secret_access_key") + // Default to empty string if not found. + token := iniProfile.Key("aws_session_token") + + p.retrieved = true + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + SignerType: SignatureV4, + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileAWSCredentials) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (*ini.Section, error) { + config, err := ini.Load(filename) + if err != nil { + return nil, err + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return nil, err + } + return iniProfile, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go new file mode 100644 index 0000000..6a6827e --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/file_minio_client.go @@ -0,0 +1,133 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + homedir "github.com/mitchellh/go-homedir" +) + +// A FileMinioClient retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Configuration file example: $HOME/.mc/config.json +type FileMinioClient struct { + // Path to the shared credentials file. + // + // If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.mc/config.json" + // Windows: "%USERALIAS%\mc\config.json" + filename string + + // Minio Alias to extract credentials from the shared credentials file. If empty + // will default to environment variable "MINIO_ALIAS" or "default" if + // environment variable is also not set. + alias string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewFileMinioClient returns a pointer to a new Credentials object +// wrapping the Alias file provider. +func NewFileMinioClient(filename string, alias string) *Credentials { + return New(&FileMinioClient{ + filename: filename, + alias: alias, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *FileMinioClient) Retrieve() (Value, error) { + if p.filename == "" { + if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { + p.filename = value + } else { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.filename = filepath.Join(homeDir, "mc", "config.json") + } + } + } + + if p.alias == "" { + p.alias = os.Getenv("MINIO_ALIAS") + if p.alias == "" { + p.alias = "s3" + } + } + + p.retrieved = false + + hostCfg, err := loadAlias(p.filename, p.alias) + if err != nil { + return Value{}, err + } + + p.retrieved = true + return Value{ + AccessKeyID: hostCfg.AccessKey, + SecretAccessKey: hostCfg.SecretKey, + SignerType: parseSignatureType(hostCfg.API), + }, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *FileMinioClient) IsExpired() bool { + return !p.retrieved +} + +// hostConfig configuration of a host. +type hostConfig struct { + URL string `json:"url"` + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + API string `json:"api"` +} + +// config config version. +type config struct { + Version string `json:"version"` + Hosts map[string]hostConfig `json:"hosts"` +} + +// loadAliass loads from the file pointed to by shared credentials filename for alias. +// The credentials retrieved from the alias will be returned or error. Error will be +// returned if it fails to read from the file. +func loadAlias(filename, alias string) (hostConfig, error) { + cfg := &config{} + configBytes, err := ioutil.ReadFile(filename) + if err != nil { + return hostConfig{}, err + } + if err = json.Unmarshal(configBytes, cfg); err != nil { + return hostConfig{}, err + } + return cfg.Hosts[alias], nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go new file mode 100644 index 0000000..05b2a8b --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/iam_aws.go @@ -0,0 +1,250 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "bufio" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "path" + "time" +) + +// DefaultExpiryWindow - Default expiry window. +// ExpiryWindow will allow the credentials to trigger refreshing +// prior to the credentials actually expiring. This is beneficial +// so race conditions with expiring credentials do not cause +// request to fail unexpectedly due to ExpiredTokenException exceptions. +const DefaultExpiryWindow = time.Second * 10 // 10 secs + +// A IAM retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +type IAM struct { + Expiry + + // Required http Client to use when connecting to IAM metadata service. + Client *http.Client + + // Custom endpoint to fetch IAM role credentials. + endpoint string +} + +// IAM Roles for Amazon EC2 +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +const ( + defaultIAMRoleEndpoint = "http://169.254.169.254" + defaultECSRoleEndpoint = "http://169.254.170.2" + defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials" +) + +// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html +func getEndpoint(endpoint string) (string, bool) { + if endpoint != "" { + return endpoint, os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") != "" + } + if ecsURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); ecsURI != "" { + return fmt.Sprintf("%s%s", defaultECSRoleEndpoint, ecsURI), true + } + return defaultIAMRoleEndpoint, false +} + +// NewIAM returns a pointer to a new Credentials object wrapping the IAM. +func NewIAM(endpoint string) *Credentials { + p := &IAM{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + endpoint: endpoint, + } + return New(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired +func (m *IAM) Retrieve() (Value, error) { + endpoint, isEcsTask := getEndpoint(m.endpoint) + var roleCreds ec2RoleCredRespBody + var err error + if isEcsTask { + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) + } else { + roleCreds, err = getCredentials(m.Client, endpoint) + } + if err != nil { + return Value{}, err + } + // Expiry window is set to 10secs. + m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + SignerType: SignatureV4, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string + + // Unused params. + LastUpdated time.Time + Type string +} + +// Get the final IAM role URL where the request will +// be sent to fetch the rolling access credentials. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func getIAMRoleURL(endpoint string) (*url.URL, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + u.Path = defaultIAMSecurityCredsPath + return u, nil +} + +// listRoleNames lists of credential role names associated +// with the current EC2 service. If there are no credentials, +// or there is an error making or receiving the request. +// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html +func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, errors.New(resp.Status) + } + + credsList := []string{} + s := bufio.NewScanner(resp.Body) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, err + } + + return credsList, nil +} + +func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + return respCreds, nil +} + +// getCredentials - obtains the credentials from the IAM role name associated with +// the current EC2 service. +// +// If the credentials cannot be found, or there is an error +// reading the response an error will be returned. +func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + u, err := getIAMRoleURL(endpoint) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + roleNames, err := listRoleNames(client, u) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + if len(roleNames) == 0 { + return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service") + } + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // - An instance profile can contain only one IAM role. This limit cannot be increased. + roleName := roleNames[0] + + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html + // The following command retrieves the security credentials for an + // IAM role named `s3access`. + // + // $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access + // + u.Path = path.Join(u.Path, roleName) + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, errors.New(respCreds.Message) + } + + return respCreds, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go new file mode 100644 index 0000000..1b768e8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/signature-type.go @@ -0,0 +1,77 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import "strings" + +// SignatureType is type of Authorization requested for a given HTTP request. +type SignatureType int + +// Different types of supported signatures - default is SignatureV4 or SignatureDefault. +const ( + // SignatureDefault is always set to v4. + SignatureDefault SignatureType = iota + SignatureV4 + SignatureV2 + SignatureV4Streaming + SignatureAnonymous // Anonymous signature signifies, no signature. +) + +// IsV2 - is signature SignatureV2? +func (s SignatureType) IsV2() bool { + return s == SignatureV2 +} + +// IsV4 - is signature SignatureV4? +func (s SignatureType) IsV4() bool { + return s == SignatureV4 || s == SignatureDefault +} + +// IsStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) IsStreamingV4() bool { + return s == SignatureV4Streaming +} + +// IsAnonymous - is signature empty? +func (s SignatureType) IsAnonymous() bool { + return s == SignatureAnonymous +} + +// Stringer humanized version of signature type, +// strings returned here are case insensitive. +func (s SignatureType) String() string { + if s.IsV2() { + return "S3v2" + } else if s.IsV4() { + return "S3v4" + } else if s.IsStreamingV4() { + return "S3v4Streaming" + } + return "Anonymous" +} + +func parseSignatureType(str string) SignatureType { + if strings.EqualFold(str, "S3v4") { + return SignatureV4 + } else if strings.EqualFold(str, "S3v2") { + return SignatureV2 + } else if strings.EqualFold(str, "S3v4Streaming") { + return SignatureV4Streaming + } + return SignatureAnonymous +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/static.go b/vendor/github.com/minio/minio-go/pkg/credentials/static.go new file mode 100644 index 0000000..8b0ba71 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/static.go @@ -0,0 +1,67 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +// A Static is a set of credentials which are set programmatically, +// and will never expire. +type Static struct { + Value +} + +// NewStaticV2 returns a pointer to a new Credentials object +// wrapping a static credentials value provider, signature is +// set to v2. If access and secret are not specified then +// regardless of signature type set it Value will return +// as anonymous. +func NewStaticV2(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV2) +} + +// NewStaticV4 is similar to NewStaticV2 with similar considerations. +func NewStaticV4(id, secret, token string) *Credentials { + return NewStatic(id, secret, token, SignatureV4) +} + +// NewStatic returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStatic(id, secret, token string, signerType SignatureType) *Credentials { + return New(&Static{ + Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + SignerType: signerType, + }, + }) +} + +// Retrieve returns the static credentials. +func (s *Static) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + // Anonymous is not an error + return Value{SignerType: SignatureAnonymous}, nil + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For Static, the credentials never expired. +func (s *Static) IsExpired() bool { + return false +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/sts_client_grants.go b/vendor/github.com/minio/minio-go/pkg/credentials/sts_client_grants.go new file mode 100644 index 0000000..f0a4e8d --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/sts_client_grants.go @@ -0,0 +1,173 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// AssumedRoleUser - The identifiers for the temporary security credentials that +// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + Arn string + AssumedRoleID string `xml:"AssumeRoleId"` +} + +// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request. +type AssumeRoleWithClientGrantsResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"` + Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants +// request, including temporary credentials that can be used to make Minio API requests. +type ClientGrantsResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromClientGrantsToken string `xml:",omitempty"` +} + +// ClientGrantsToken - client grants token with expiry. +type ClientGrantsToken struct { + token string + expiry int +} + +// Token - access token returned after authenticating client grants. +func (c *ClientGrantsToken) Token() string { + return c.token +} + +// Expiry - expiry for the access token returned after authenticating +// client grants. +func (c *ClientGrantsToken) Expiry() string { + return fmt.Sprintf("%d", c.expiry) +} + +// A STSClientGrants retrieves credentials from Minio service, and keeps track if +// those credentials are expired. +type STSClientGrants struct { + Expiry + + // Required http Client to use when connecting to Minio STS service. + Client *http.Client + + // Minio endpoint to fetch STS credentials. + stsEndpoint string + + // getClientGrantsTokenExpiry function to retrieve tokens + // from IDP This function should return two values one is + // accessToken which is a self contained access token (JWT) + // and second return value is the expiry associated with + // this token. This is a customer provided function and + // is mandatory. + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error) +} + +// NewSTSClientGrants returns a pointer to a new +// Credentials object wrapping the STSClientGrants. +func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getClientGrantsTokenExpiry == nil { + return nil, errors.New("Client grants access token and expiry retrieval function should be defined") + } + return New(&STSClientGrants{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + stsEndpoint: stsEndpoint, + getClientGrantsTokenExpiry: getClientGrantsTokenExpiry, + }), nil +} + +func getClientGrantsCredentials(clnt *http.Client, endpoint string, + getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) { + + accessToken, err := getClientGrantsTokenExpiry() + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithClientGrants") + v.Set("Token", accessToken.Token()) + v.Set("DurationSeconds", accessToken.Expiry()) + v.Set("Version", "2011-06-15") + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + u.RawQuery = v.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status) + } + + a := AssumeRoleWithClientGrantsResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithClientGrantsResponse{}, err + } + return a, nil +} + +// Retrieve retrieves credentials from the Minio service. +// Error will be returned if the request fails. +func (m *STSClientGrants) Retrieve() (Value, error) { + a, err := getClientGrantsCredentials(m.Client, m.stsEndpoint, m.getClientGrantsTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/credentials/sts_web_identity.go b/vendor/github.com/minio/minio-go/pkg/credentials/sts_web_identity.go new file mode 100644 index 0000000..d924b16 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/credentials/sts_web_identity.go @@ -0,0 +1,169 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2019 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request. +type AssumeRoleWithWebIdentityResponse struct { + XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"` + Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"` + ResponseMetadata struct { + RequestID string `xml:"RequestId,omitempty"` + } `xml:"ResponseMetadata,omitempty"` +} + +// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity +// request, including temporary credentials that can be used to make Minio API requests. +type WebIdentityResult struct { + AssumedRoleUser AssumedRoleUser `xml:",omitempty"` + Audience string `xml:",omitempty"` + Credentials struct { + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` + Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` + } `xml:",omitempty"` + PackedPolicySize int `xml:",omitempty"` + Provider string `xml:",omitempty"` + SubjectFromWebIdentityToken string `xml:",omitempty"` +} + +// WebIdentityToken - web identity token with expiry. +type WebIdentityToken struct { + token string + expiry int +} + +// Token - access token returned after authenticating web identity. +func (c *WebIdentityToken) Token() string { + return c.token +} + +// Expiry - expiry for the access token returned after authenticating +// web identity. +func (c *WebIdentityToken) Expiry() string { + return fmt.Sprintf("%d", c.expiry) +} + +// A STSWebIdentity retrieves credentials from Minio service, and keeps track if +// those credentials are expired. +type STSWebIdentity struct { + Expiry + + // Required http Client to use when connecting to Minio STS service. + Client *http.Client + + // Minio endpoint to fetch STS credentials. + stsEndpoint string + + // getWebIDTokenExpiry function which returns ID tokens + // from IDP. This function should return two values one + // is ID token which is a self contained ID token (JWT) + // and second return value is the expiry associated with + // this token. + // This is a customer provided function and is mandatory. + getWebIDTokenExpiry func() (*WebIdentityToken, error) +} + +// NewSTSWebIdentity returns a pointer to a new +// Credentials object wrapping the STSWebIdentity. +func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) { + if stsEndpoint == "" { + return nil, errors.New("STS endpoint cannot be empty") + } + if getWebIDTokenExpiry == nil { + return nil, errors.New("Web ID token and expiry retrieval function should be defined") + } + return New(&STSWebIdentity{ + Client: &http.Client{ + Transport: http.DefaultTransport, + }, + stsEndpoint: stsEndpoint, + getWebIDTokenExpiry: getWebIDTokenExpiry, + }), nil +} + +func getWebIdentityCredentials(clnt *http.Client, endpoint string, + getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) { + idToken, err := getWebIDTokenExpiry() + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + v := url.Values{} + v.Set("Action", "AssumeRoleWithWebIdentity") + v.Set("WebIdentityToken", idToken.Token()) + v.Set("DurationSeconds", idToken.Expiry()) + v.Set("Version", "2011-06-15") + + u, err := url.Parse(endpoint) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + u.RawQuery = v.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + resp, err := clnt.Do(req) + if err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status) + } + + a := AssumeRoleWithWebIdentityResponse{} + if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil { + return AssumeRoleWithWebIdentityResponse{}, err + } + + return a, nil +} + +// Retrieve retrieves credentials from the Minio service. +// Error will be returned if the request fails. +func (m *STSWebIdentity) Retrieve() (Value, error) { + a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.getWebIDTokenExpiry) + if err != nil { + return Value{}, err + } + + // Expiry window is set to 10secs. + m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow) + + return Value{ + AccessKeyID: a.Result.Credentials.AccessKey, + SecretAccessKey: a.Result.Credentials.SecretKey, + SessionToken: a.Result.Credentials.SessionToken, + SignerType: SignatureV4, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go new file mode 100644 index 0000000..2d3c70f --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/server-side.go @@ -0,0 +1,195 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "net/http" + + "golang.org/x/crypto/argon2" +) + +const ( + // sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS. + sseGenericHeader = "X-Amz-Server-Side-Encryption" + + // sseKmsKeyID is the AWS SSE-KMS key id. + sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id" + // sseEncryptionContext is the AWS SSE-KMS Encryption Context data. + sseEncryptionContext = sseGenericHeader + "-Encryption-Context" + + // sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key. + sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm" + // sseCustomerKey is the AWS SSE-C encryption key HTTP header key. + sseCustomerKey = sseGenericHeader + "-Customer-Key" + // sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key. + sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5" + + // sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API. + sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm" + // sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API. + sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key" + // sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API. + sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5" +) + +// PBKDF creates a SSE-C key from the provided password and salt. +// PBKDF is a password-based key derivation function +// which can be used to derive a high-entropy cryptographic +// key from a low-entropy password and a salt. +type PBKDF func(password, salt []byte) ServerSide + +// DefaultPBKDF is the default PBKDF. It uses Argon2id with the +// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads). +var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide { + sse := ssec{} + copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32)) + return sse +} + +// Type is the server-side-encryption method. It represents one of +// the following encryption methods: +// - SSE-C: server-side-encryption with customer provided keys +// - KMS: server-side-encryption with managed keys +// - S3: server-side-encryption using S3 storage encryption +type Type string + +const ( + // SSEC represents server-side-encryption with customer provided keys + SSEC Type = "SSE-C" + // KMS represents server-side-encryption with managed keys + KMS Type = "KMS" + // S3 represents server-side-encryption using S3 storage encryption + S3 Type = "S3" +) + +// ServerSide is a form of S3 server-side-encryption. +type ServerSide interface { + // Type returns the server-side-encryption method. + Type() Type + + // Marshal adds encryption headers to the provided HTTP headers. + // It marks an HTTP request as server-side-encryption request + // and inserts the required data into the headers. + Marshal(h http.Header) +} + +// NewSSE returns a server-side-encryption using S3 storage encryption. +// Using SSE-S3 the server will encrypt the object with server-managed keys. +func NewSSE() ServerSide { return s3{} } + +// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context. +func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) { + if context == nil { + return kms{key: keyID, hasContext: false}, nil + } + serializedContext, err := json.Marshal(context) + if err != nil { + return nil, err + } + return kms{key: keyID, context: serializedContext, hasContext: true}, nil +} + +// NewSSEC returns a new server-side-encryption using SSE-C and the provided key. +// The key must be 32 bytes long. +func NewSSEC(key []byte) (ServerSide, error) { + if len(key) != 32 { + return nil, errors.New("encrypt: SSE-C key must be 256 bit long") + } + sse := ssec{} + copy(sse[:], key) + return sse, nil +} + +// SSE transforms a SSE-C copy encryption into a SSE-C encryption. +// It is the inverse of SSECopy(...). +// +// If the provided sse is no SSE-C copy encryption SSE returns +// sse unmodified. +func SSE(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssecCopy); ok { + return ssec(sse) + } + return sse +} + +// SSECopy transforms a SSE-C encryption into a SSE-C copy +// encryption. This is required for SSE-C key rotation or a SSE-C +// copy where the source and the destination should be encrypted. +// +// If the provided sse is no SSE-C encryption SSECopy returns +// sse unmodified. +func SSECopy(sse ServerSide) ServerSide { + if sse == nil || sse.Type() != SSEC { + return sse + } + if sse, ok := sse.(ssec); ok { + return ssecCopy(sse) + } + return sse +} + +type ssec [32]byte + +func (s ssec) Type() Type { return SSEC } + +func (s ssec) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCustomerAlgorithm, "AES256") + h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type ssecCopy [32]byte + +func (s ssecCopy) Type() Type { return SSEC } + +func (s ssecCopy) Marshal(h http.Header) { + keyMD5 := md5.Sum(s[:]) + h.Set(sseCopyCustomerAlgorithm, "AES256") + h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:])) + h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:])) +} + +type s3 struct{} + +func (s s3) Type() Type { return S3 } + +func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") } + +type kms struct { + key string + context []byte + hasContext bool +} + +func (s kms) Type() Type { return KMS } + +func (s kms) Marshal(h http.Header) { + h.Set(sseGenericHeader, "aws:kms") + h.Set(sseKmsKeyID, s.key) + if s.hasContext { + h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context)) + } +} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go new file mode 100644 index 0000000..156a6d6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go @@ -0,0 +1,306 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "strconv" + "strings" + "time" +) + +// Reference for constants used below - +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming +const ( + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF +) + +// Request headers to be ignored while calculating seed signature for +// a request. +var ignoredStreamingHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, + "Content-Type": true, +} + +// getSignedChunkLength - calculates the length of chunk metadata +func getSignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + chunkSigConstLen + + signatureStrLen + + crlfLen + + chunkDataSize + + crlfLen +} + +// getStreamLength - calculates the length of the overall stream (data + metadata) +func getStreamLength(dataLen, chunkSize int64) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getSignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getSignedChunkLength(remainingBytes) + } + streamLen += getSignedChunkLength(0) + return streamLen +} + +// buildChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingPayloadHdr, + t.Format(iso8601DateFormat), + getScope(region, t), + previousSig, + emptySHA256, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) { + // Set x-amz-content-sha256 header. + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + // Set content length with streaming signature for each chunk included. + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildChunkHeader(chunkLen int64, signature string) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string) string { + + chunkStringToSign := buildChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime) + return getSignature(signingKey, chunkStringToSign) +} + +// getSeedSignature - returns the seed signature for a given request. +func (s *StreamingReader) setSeedSignature(req *http.Request) { + // Get canonical request + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest) + + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime) + + // Calculate signature. + s.seedSignature = getSignature(signingKey, stringToSign) +} + +// StreamingReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingReader struct { + accessKeyID string + secretAccessKey string + sessionToken string + region string + prevSignature string + seedSignature string + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + reqTime time.Time + chunkNum int + totalChunks int + lastChunkSize int +} + +// signChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingReader) signChunk(chunkLen int) { + // Compute chunk signature for next header + signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + // Write chunk header into streaming buffer + chunkHdr := buildChunkHeader(int64(chunkLen), signature) + s.buf.Write(chunkHdr) + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + s.buf.Write([]byte("\r\n")) + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// setStreamingAuthHeader - builds and sets authorization header value +// for streaming signature. +func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { + credential := GetCredential(s.accessKeyID, s.region, s.reqTime) + authParts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), + "Signature=" + s.seedSignature, + } + + // Set authorization header. + auth := strings.Join(authParts, ",") + req.Header.Set("Authorization", auth) +} + +// StreamingSignV4 - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken, + region string, dataLen int64, reqTime time.Time) *http.Request { + + // Set headers needed for streaming signature. + prepareStreamingRequest(req, sessionToken, dataLen, reqTime) + + if req.Body == nil { + req.Body = ioutil.NopCloser(bytes.NewReader([]byte(""))) + } + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + sessionToken: sessionToken, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + // Usually we validate `err` first, but in this case + // we are validating n > 0 for the following reasons. + // + // 1. n > 0, err is one of io.EOF, nil (near end of stream) + // A Reader returning a non-zero number of bytes at the end + // of the input stream may return either err == EOF or err == nil + // + // 2. n == 0, err is io.EOF (actual end of stream) + // + // Callers should always process the n > 0 bytes returned + // before considering the error err. + if n1 > 0 { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.signChunk(s.chunkBufLen) + break + } + } + if err != nil { + if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, io.ErrUnexpectedEOF + } + + // Sign the chunk and write it to s.buf. + s.signChunk(0) + break + } + return 0, err + } + + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go new file mode 100644 index 0000000..b407093 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go @@ -0,0 +1,316 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV2Algorithm = "AWS" +) + +// Encode input URL path to URL encoded path. +func encodeURL2Path(req *http.Request, virtualHost bool) (path string) { + if virtualHost { + reqHost := getHostAddr(req) + dotPos := strings.Index(reqHost, ".") + if dotPos > -1 { + bucketName := reqHost[:dotPos] + path = "/" + bucketName + path += req.URL.Path + path = s3utils.EncodePath(path) + return + } + } + path = s3utils.EncodePath(req.URL.Path) + return +} + +// PreSignV2 - presign the request in following style. +// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + d := time.Now().UTC() + // Find epoch expires when the request will expire. + epochExpires := d.Unix() + expires + + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Calculate signature. + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + + query := req.URL.Query() + // Handle specially for Google Cloud Storage. + if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") { + query.Set("GoogleAccessId", accessKeyID) + } else { + query.Set("AWSAccessKeyId", accessKeyID) + } + + // Fill in Expires for presigned query. + query.Set("Expires", strconv.FormatInt(epochExpires, 10)) + + // Encode query and save. + req.URL.RawQuery = s3utils.QueryEncode(query) + + // Save signature finally. + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) + + // Return. + return &req +} + +// PostPresignSignatureV2 - presigned signature for PostPolicy +// request. +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(policyBase64)) + signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) + return signature +} + +// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature; +// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +// +// CanonicalizedProtocolHeaders = + +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + d := time.Now().UTC() + + // Add date if not present. + if date := req.Header.Get("Date"); date == "" { + req.Header.Set("Date", d.Format(http.TimeFormat)) + } + + // Calculate HMAC for secretAccessKey. + stringToSign := stringToSignV2(req, virtualHost) + hm := hmac.New(sha1.New, []byte(secretAccessKey)) + hm.Write([]byte(stringToSign)) + + // Prepare auth header. + authHeader := new(bytes.Buffer) + authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID)) + encoder := base64.NewEncoder(base64.StdEncoding, authHeader) + encoder.Write(hm.Sum(nil)) + encoder.Close() + + // Set Authorization header. + req.Header.Set("Authorization", authHeader.String()) + + return &req +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Date + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func stringToSignV2(req http.Request, virtualHost bool) string { + buf := new(bytes.Buffer) + // Write standard headers. + writeSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + writeCanonicalizedResource(buf, req, virtualHost) + return buf.String() +} + +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") +} + +// writeCanonicalizedHeaders - write canonicalized headers. +func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { + var protoHeaders []string + vals := make(map[string][]string) + for k, vv := range req.Header { + // All the AMZ headers should be lowercase + lk := strings.ToLower(k) + if strings.HasPrefix(lk, "x-amz") { + protoHeaders = append(protoHeaders, lk) + vals[lk] = vv + } + } + sort.Strings(protoHeaders) + for _, k := range protoHeaders { + buf.WriteString(k) + buf.WriteByte(':') + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + if strings.Contains(v, "\n") { + // TODO: "Unfold" long headers that + // span multiple lines (as allowed by + // RFC 2616, section 4.2) by replacing + // the folding white-space (including + // new-line) by a single space. + buf.WriteString(v) + } else { + buf.WriteString(v) + } + } + buf.WriteByte('\n') + } +} + +// AWS S3 Signature V2 calculation rule is give here: +// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign + +// Whitelist resource list that will be used in query string for signature-V2 calculation. +// The list should be alphabetically sorted +var resourceList = []string{ + "acl", + "delete", + "lifecycle", + "location", + "logging", + "notification", + "partNumber", + "policy", + "requestPayment", + "response-cache-control", + "response-content-disposition", + "response-content-encoding", + "response-content-language", + "response-content-type", + "response-expires", + "torrent", + "uploadId", + "uploads", + "versionId", + "versioning", + "versions", + "website", +} + +// From the Amazon docs: +// +// CanonicalizedResource = [ "/" + Bucket ] + +// + +// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) { + // Save request URL. + requestURL := req.URL + // Get encoded URL path. + buf.WriteString(encodeURL2Path(&req, virtualHost)) + if requestURL.RawQuery != "" { + var n int + vals, _ := url.ParseQuery(requestURL.RawQuery) + // Verify if any sub resource queries are present, if yes + // canonicallize them. + for _, resource := range resourceList { + if vv, ok := vals[resource]; ok && len(vv) > 0 { + n++ + // First element + switch n { + case 1: + buf.WriteByte('?') + // The rest + default: + buf.WriteByte('&') + } + buf.WriteString(resource) + // Request parameters + if len(vv[0]) > 0 { + buf.WriteByte('=') + buf.WriteString(vv[0]) + } + } + } + } +} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go new file mode 100644 index 0000000..daf02fe --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -0,0 +1,315 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "encoding/hex" + "net/http" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// Signature and API related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" + yyyymmdd = "20060102" +) + +/// +/// Excerpts from @lsegal - +/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258. +/// +/// User-Agent: +/// +/// This is ignored from signing because signing this causes +/// problems with generating pre-signed URLs (that are executed +/// by other agents) or when customers pass requests through +/// proxies, which may modify the user-agent. +/// +/// Content-Length: +/// +/// This is ignored from signing because generating a pre-signed +/// URL should not provide a content-length constraint, +/// specifically when vending a S3 pre-signed PUT URL. The +/// corollary to this is that when sending regular requests +/// (non-pre-signed), the signature contains a checksum of the +/// body, which implicitly validates the payload length (since +/// changing the number of bytes would change the checksum) +/// and therefore this header is not valuable in the signature. +/// +/// Content-Type: +/// +/// Signing this header causes quite a number of problems in +/// browser environments, where browsers like to modify and +/// normalize the content-type header in different ways. There is +/// more information on this in https://goo.gl/2E9gyy. Avoiding +/// this field simplifies logic and reduces the possibility of +/// future bugs. +/// +/// Authorization: +/// +/// Is skipped for obvious reasons +/// +var v4IgnoredHeaders = map[string]bool{ + "Authorization": true, + "Content-Type": true, + "Content-Length": true, + "User-Agent": true, +} + +// getSigningKey hmac seed to calculate final signature. +func getSigningKey(secret, loc string, t time.Time) []byte { + date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd))) + location := sumHMAC(date, []byte(loc)) + service := sumHMAC(location, []byte("s3")) + signingKey := sumHMAC(service, []byte("aws4_request")) + return signingKey +} + +// getSignature final signature in hexadecimal form. +func getSignature(signingKey []byte, stringToSign string) string { + return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign))) +} + +// getScope generate a string of a specific date, an AWS region, and a +// service. +func getScope(location string, t time.Time) string { + scope := strings.Join([]string{ + t.Format(yyyymmdd), + location, + "s3", + "aws4_request", + }, "/") + return scope +} + +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time) string { + scope := getScope(location, t) + return accessKeyID + "/" + scope +} + +// getHashedPayload get the hexadecimal value of the SHA256 hash of +// the request payload. +func getHashedPayload(req http.Request) string { + hashedPayload := req.Header.Get("X-Amz-Content-Sha256") + if hashedPayload == "" { + // Presign does not have a payload, use S3 recommended value. + hashedPayload = unsignedPayload + } + return hashedPayload +} + +// getCanonicalHeaders generate a list of request headers for +// signature. +func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + vals := make(map[string][]string) + for k, vv := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // ignored header + } + headers = append(headers, strings.ToLower(k)) + vals[strings.ToLower(k)] = vv + } + headers = append(headers, "host") + sort.Strings(headers) + + var buf bytes.Buffer + // Save all the headers in canonical form
: newline + // separated for each header. + for _, k := range headers { + buf.WriteString(k) + buf.WriteByte(':') + switch { + case k == "host": + buf.WriteString(getHostAddr(&req)) + fallthrough + default: + for idx, v := range vals[k] { + if idx > 0 { + buf.WriteByte(',') + } + buf.WriteString(v) + } + buf.WriteByte('\n') + } + } + return buf.String() +} + +// getSignedHeaders generate all signed request headers. +// i.e lexically sorted, semicolon-separated list of lowercase +// request header names. +func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { + var headers []string + for k := range req.Header { + if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { + continue // Ignored header found continue. + } + headers = append(headers, strings.ToLower(k)) + } + headers = append(headers, "host") + sort.Strings(headers) + return strings.Join(headers, ";") +} + +// getCanonicalRequest generate a canonical request of style. +// +// canonicalRequest = +// \n +// \n +// \n +// \n +// \n +// +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string { + req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) + canonicalRequest := strings.Join([]string{ + req.Method, + s3utils.EncodePath(req.URL.Path), + req.URL.RawQuery, + getCanonicalHeaders(req, ignoredHeaders), + getSignedHeaders(req, ignoredHeaders), + getHashedPayload(req), + }, "\n") + return canonicalRequest +} + +// getStringToSign a string based on selected query values. +func getStringToSignV4(t time.Time, location, canonicalRequest string) string { + stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n" + stringToSign = stringToSign + getScope(location, t) + "\n" + stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest))) + return stringToSign +} + +// PreSignV4 presign the request, in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request { + // Presign is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Get credential string. + credential := GetCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Set URL query. + query := req.URL.Query() + query.Set("X-Amz-Algorithm", signV4Algorithm) + query.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10)) + query.Set("X-Amz-SignedHeaders", signedHeaders) + query.Set("X-Amz-Credential", credential) + // Set session token if available. + if sessionToken != "" { + query.Set("X-Amz-Security-Token", sessionToken) + } + req.URL.RawQuery = query.Encode() + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Gext hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // Add signature header to RawQuery. + req.URL.RawQuery += "&X-Amz-Signature=" + signature + + return &req +} + +// PostPresignSignatureV4 - presigned signature for PostPolicy +// requests. +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { + // Get signining key. + signingkey := getSigningKey(secretAccessKey, location, t) + // Calculate signature. + signature := getSignature(signingkey, policyBase64) + return signature +} + +// SignV4 sign the request before Do(), in accordance with +// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. +func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request { + // Signature calculation is not needed for anonymous credentials. + if accessKeyID == "" || secretAccessKey == "" { + return &req + } + + // Initial time. + t := time.Now().UTC() + + // Set x-amz-date. + req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) + + // Set session token if available. + if sessionToken != "" { + req.Header.Set("X-Amz-Security-Token", sessionToken) + } + + // Get canonical request. + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(t, location, canonicalRequest) + + // Get hmac signing key. + signingKey := getSigningKey(secretAccessKey, location, t) + + // Get credential string. + credential := GetCredential(accessKeyID, location, t) + + // Get all signed headers. + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) + + // Calculate signature. + signature := getSignature(signingKey, stringToSign) + + // If regular request, construct the final authorization header. + parts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + signedHeaders, + "Signature=" + signature, + } + + // Set authorization header. + auth := strings.Join(parts, ", ") + req.Header.Set("Authorization", auth) + + return &req +} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go new file mode 100644 index 0000000..33b1752 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go @@ -0,0 +1,49 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "crypto/hmac" + "crypto/sha256" + "net/http" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +// getHostAddr returns host header if available, otherwise returns host from URL +func getHostAddr(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go new file mode 100644 index 0000000..adceb7f --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go @@ -0,0 +1,331 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "errors" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start or end with a "." + if host[len(host)-1:] == "." || host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 1 { + return parts[1] + } + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint. +func IsAmazonEndpoint(endpointURL url.URL) bool { + if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" { + return true + } + return GetRegionFromURL(endpointURL) != "" +} + +// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint. +func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + IsAmazonFIPSGovCloudEndpoint(endpointURL)) +} + +// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" +} + +// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + switch endpointURL.Host { + case "s3-fips.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-west-1.amazonaws.com": + case "s3-fips.dualstack.us-west-2.amazonaws.com": + case "s3-fips.dualstack.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-east-1.amazonaws.com": + case "s3-fips.us-west-1.amazonaws.com": + case "s3-fips.us-west-2.amazonaws.com": + case "s3-fips.us-east-1.amazonaws.com": + default: + return false + } + return true +} + +// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { + return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) +} + +// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. +func IsGoogleEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + return endpointURL.Host == "storage.googleapis.com" +} + +// Expects ascii encoded strings - from output of urlEncodePath +func percentEncodeSlash(s string) string { + return strings.Replace(s, "/", "%2F", -1) +} + +// QueryEncode - encodes query values in their URL encoded form. In +// addition to the percent encoding performed by urlEncodePath() used +// here, it also percent encodes '/' (forward slash) +func QueryEncode(v url.Values) string { + if v == nil { + return "" + } + var buf bytes.Buffer + keys := make([]string, 0, len(v)) + for k := range v { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := v[k] + prefix := percentEncodeSlash(EncodePath(k)) + "=" + for _, v := range vs { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} + +// We support '.' with bucket names but we fallback to using path +// style requests instead for such buckets. +var ( + validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`) + validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) + ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) +) + +// Common checker for both stricter and basic validation. +func checkBucketNameCommon(bucketName string, strict bool) (err error) { + if strings.TrimSpace(bucketName) == "" { + return errors.New("Bucket name cannot be empty") + } + if len(bucketName) < 3 { + return errors.New("Bucket name cannot be smaller than 3 characters") + } + if len(bucketName) > 63 { + return errors.New("Bucket name cannot be greater than 63 characters") + } + if ipAddress.MatchString(bucketName) { + return errors.New("Bucket name cannot be an ip address") + } + if strings.Contains(bucketName, "..") { + return errors.New("Bucket name contains invalid characters") + } + if strict { + if !validBucketNameStrict.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err + } + if !validBucketName.MatchString(bucketName) { + err = errors.New("Bucket name contains invalid characters") + } + return err +} + +// CheckValidBucketName - checks if we have a valid input bucket name. +func CheckValidBucketName(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, false) +} + +// CheckValidBucketNameStrict - checks if we have a valid input bucket name. +// This is a stricter version. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func CheckValidBucketNameStrict(bucketName string) (err error) { + return checkBucketNameCommon(bucketName, true) +} + +// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectNamePrefix(objectName string) error { + if len(objectName) > 1024 { + return errors.New("Object name cannot be greater than 1024 characters") + } + if !utf8.ValidString(objectName) { + return errors.New("Object name with non UTF-8 strings are not supported") + } + return nil +} + +// CheckValidObjectName - checks if we have a valid input object name. +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func CheckValidObjectName(objectName string) error { + if strings.TrimSpace(objectName) == "" { + return errors.New("Object name cannot be empty") + } + return CheckValidObjectNamePrefix(objectName) +} diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go new file mode 100644 index 0000000..efd0262 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go @@ -0,0 +1,197 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import ( + "encoding/json" + "fmt" + "sort" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +// If 'data' contains JSON string array, the set contains each string. +// If 'data' contains JSON string, the set contains the string as one element. +// If 'data' contains Other JSON types, JSON parse error is returned. +func (set *StringSet) UnmarshalJSON(data []byte) error { + sl := []string{} + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(s) + } + } else { + var s string + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(s) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet) + for _, k := range sl { + set.Add(k) + } + return set +} + +// CopyStringSet - returns copy of given set. +func CopyStringSet(set StringSet) StringSet { + nset := NewStringSet() + for k, v := range set { + nset[k] = v + } + return nset +} diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go new file mode 100644 index 0000000..c285fde --- /dev/null +++ b/vendor/github.com/minio/minio-go/post-policy.go @@ -0,0 +1,270 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/base64" + "fmt" + "strings" + "time" +) + +// expirationDateFormat date format for expiration key in json policy. +const expirationDateFormat = "2006-01-02T15:04:05.999Z" + +// policyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// +// Example: +// +// policyCondition { +// matchType: "$eq", +// key: "$Content-Type", +// value: "image/png", +// } +// +type policyCondition struct { + matchType string + condition string + value string +} + +// PostPolicy - Provides strict static type conversion and validation +// for Amazon S3's POST policy JSON string. +type PostPolicy struct { + // Expiration date and time of the POST policy. + expiration time.Time + // Collection of different policy conditions. + conditions []policyCondition + // ContentLengthRange minimum and maximum allowable size for the + // uploaded content. + contentLengthRange struct { + min int64 + max int64 + } + + // Post form data. + formData map[string]string +} + +// NewPostPolicy - Instantiate new post policy. +func NewPostPolicy() *PostPolicy { + p := &PostPolicy{} + p.conditions = make([]policyCondition, 0) + p.formData = make(map[string]string) + return p +} + +// SetExpires - Sets expiration time for the new policy. +func (p *PostPolicy) SetExpires(t time.Time) error { + if t.IsZero() { + return ErrInvalidArgument("No expiry time set.") + } + p.expiration = t + return nil +} + +// SetKey - Sets an object name for the policy based upload. +func (p *PostPolicy) SetKey(key string) error { + if strings.TrimSpace(key) == "" || key == "" { + return ErrInvalidArgument("Object name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$key", + value: key, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = key + return nil +} + +// SetKeyStartsWith - Sets an object name that an policy based upload +// can start with. +func (p *PostPolicy) SetKeyStartsWith(keyStartsWith string) error { + if strings.TrimSpace(keyStartsWith) == "" || keyStartsWith == "" { + return ErrInvalidArgument("Object prefix is empty.") + } + policyCond := policyCondition{ + matchType: "starts-with", + condition: "$key", + value: keyStartsWith, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["key"] = keyStartsWith + return nil +} + +// SetBucket - Sets bucket at which objects will be uploaded to. +func (p *PostPolicy) SetBucket(bucketName string) error { + if strings.TrimSpace(bucketName) == "" || bucketName == "" { + return ErrInvalidArgument("Bucket name is empty.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$bucket", + value: bucketName, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["bucket"] = bucketName + return nil +} + +// SetContentType - Sets content-type of the object for this policy +// based upload. +func (p *PostPolicy) SetContentType(contentType string) error { + if strings.TrimSpace(contentType) == "" || contentType == "" { + return ErrInvalidArgument("No content type specified.") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$Content-Type", + value: contentType, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["Content-Type"] = contentType + return nil +} + +// SetContentLengthRange - Set new min and max content length +// condition for all incoming uploads. +func (p *PostPolicy) SetContentLengthRange(min, max int64) error { + if min > max { + return ErrInvalidArgument("Minimum limit is larger than maximum limit.") + } + if min < 0 { + return ErrInvalidArgument("Minimum limit cannot be negative.") + } + if max < 0 { + return ErrInvalidArgument("Maximum limit cannot be negative.") + } + p.contentLengthRange.min = min + p.contentLengthRange.max = max + return nil +} + +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return ErrInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + +// SetUserMetadata - Set user metadata as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadata(key string, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return ErrInvalidArgument("Key is empty") + } + if strings.TrimSpace(value) == "" || value == "" { + return ErrInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key string, value string) error { + if key == "" { + return ErrInvalidArgument("Key is empty") + } + if value == "" { + return ErrInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + +// addNewPolicy - internal helper to validate adding new policies. +func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { + if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { + return ErrInvalidArgument("Policy fields are empty.") + } + p.conditions = append(p.conditions, policyCond) + return nil +} + +// Stringer interface for printing policy in json formatted string. +func (p PostPolicy) String() string { + return string(p.marshalJSON()) +} + +// marshalJSON - Provides Marshalled JSON in bytes. +func (p PostPolicy) marshalJSON() []byte { + expirationStr := `"expiration":"` + p.expiration.Format(expirationDateFormat) + `"` + var conditionsStr string + conditions := []string{} + for _, po := range p.conditions { + conditions = append(conditions, fmt.Sprintf("[\"%s\",\"%s\",\"%s\"]", po.matchType, po.condition, po.value)) + } + if p.contentLengthRange.min != 0 || p.contentLengthRange.max != 0 { + conditions = append(conditions, fmt.Sprintf("[\"content-length-range\", %d, %d]", + p.contentLengthRange.min, p.contentLengthRange.max)) + } + if len(conditions) > 0 { + conditionsStr = `"conditions":[` + strings.Join(conditions, ",") + "]" + } + retStr := "{" + retStr = retStr + expirationStr + "," + retStr = retStr + conditionsStr + retStr = retStr + "}" + return []byte(retStr) +} + +// base64 - Produces base64 of PostPolicy's Marshalled json. +func (p PostPolicy) base64() string { + return base64.StdEncoding.EncodeToString(p.marshalJSON()) +} diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go new file mode 100644 index 0000000..f31dfa6 --- /dev/null +++ b/vendor/github.com/minio/minio-go/retry-continous.go @@ -0,0 +1,69 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import "time" + +// newRetryTimerContinous creates a timer with exponentially increasing delays forever. +func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // 1< maxAttempt { + attempt = maxAttempt + } + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + var nextBackoff int + for { + select { + // Attempts starts. + case attemptCh <- nextBackoff: + nextBackoff++ + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(nextBackoff)) + } + }() + return attemptCh +} diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go new file mode 100644 index 0000000..445167b --- /dev/null +++ b/vendor/github.com/minio/minio-go/retry.go @@ -0,0 +1,153 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// MaxRetry is the maximum number of retries before stopping. +var MaxRetry = 10 + +// MaxJitter will randomize over the full exponential backoff time +const MaxJitter = 1.0 + +// NoJitter disables the use of jitter for randomizing the exponential backoff time +const NoJitter = 0.0 + +// DefaultRetryUnit - default unit multiplicative per retry. +// defaults to 1 second. +const DefaultRetryUnit = time.Second + +// DefaultRetryCap - Each retry attempt never waits no longer than +// this maximum time duration. +const DefaultRetryCap = time.Second * 30 + +// newRetryTimer creates a timer with exponentially increasing +// delays until the maximum retry attempts are reached. +func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + for i := 0; i < maxRetry; i++ { + select { + // Attempts start from 1. + case attemptCh <- i + 1: + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(i)) + } + }() + return attemptCh +} + +// isHTTPReqErrorRetryable - is http requests error retryable, such +// as i/o timeout, connection broken etc.. +func isHTTPReqErrorRetryable(err error) bool { + if err == nil { + return false + } + switch e := err.(type) { + case *url.Error: + switch e.Err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError: + return true + } + if strings.Contains(err.Error(), "Connection closed by foreign host") { + return true + } else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { + // If error is - tlsHandshakeTimeoutError, retry. + return true + } else if strings.Contains(err.Error(), "i/o timeout") { + // If error is - tcp timeoutError, retry. + return true + } else if strings.Contains(err.Error(), "connection timed out") { + // If err is a net.Dial timeout, retry. + return true + } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { + // If error is transport connection broken, retry. + return true + } + } + return false +} + +// List of AWS S3 error codes which are retryable. +var retryableS3Codes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "InternalError": {}, + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "SlowDown": {}, + // Add more AWS S3 codes here. +} + +// isS3CodeRetryable - is s3 error code retryable. +func isS3CodeRetryable(s3Code string) (ok bool) { + _, ok = retryableS3Codes[s3Code] + return ok +} + +// List of HTTP status codes which are retryable. +var retryableHTTPStatusCodes = map[int]struct{}{ + 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + // Add more HTTP status codes here. +} + +// isHTTPStatusRetryable - is HTTP error code retryable. +func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { + _, ok = retryableHTTPStatusCodes[httpStatusCode] + return ok +} diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go new file mode 100644 index 0000000..0eccd24 --- /dev/null +++ b/vendor/github.com/minio/minio-go/s3-endpoints.go @@ -0,0 +1,52 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// awsS3EndpointMap Amazon S3 endpoint map. +var awsS3EndpointMap = map[string]string{ + "us-east-1": "s3.dualstack.us-east-1.amazonaws.com", + "us-east-2": "s3.dualstack.us-east-2.amazonaws.com", + "us-west-2": "s3.dualstack.us-west-2.amazonaws.com", + "us-west-1": "s3.dualstack.us-west-1.amazonaws.com", + "ca-central-1": "s3.dualstack.ca-central-1.amazonaws.com", + "eu-west-1": "s3.dualstack.eu-west-1.amazonaws.com", + "eu-west-2": "s3.dualstack.eu-west-2.amazonaws.com", + "eu-west-3": "s3.dualstack.eu-west-3.amazonaws.com", + "eu-central-1": "s3.dualstack.eu-central-1.amazonaws.com", + "eu-north-1": "s3.dualstack.eu-north-1.amazonaws.com", + "ap-south-1": "s3.dualstack.ap-south-1.amazonaws.com", + "ap-southeast-1": "s3.dualstack.ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3.dualstack.ap-southeast-2.amazonaws.com", + "ap-northeast-1": "s3.dualstack.ap-northeast-1.amazonaws.com", + "ap-northeast-2": "s3.dualstack.ap-northeast-2.amazonaws.com", + "sa-east-1": "s3.dualstack.sa-east-1.amazonaws.com", + "us-gov-west-1": "s3.dualstack.us-gov-west-1.amazonaws.com", + "us-gov-east-1": "s3.dualstack.us-gov-east-1.amazonaws.com", + "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", + "cn-northwest-1": "s3.cn-northwest-1.amazonaws.com.cn", +} + +// getS3Endpoint get Amazon S3 endpoint based on the bucket location. +func getS3Endpoint(bucketLocation string) (s3Endpoint string) { + s3Endpoint, ok := awsS3EndpointMap[bucketLocation] + if !ok { + // Default to 's3.dualstack.us-east-1.amazonaws.com' endpoint. + s3Endpoint = "s3.dualstack.us-east-1.amazonaws.com" + } + return s3Endpoint +} diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go new file mode 100644 index 0000000..3b11776 --- /dev/null +++ b/vendor/github.com/minio/minio-go/s3-error.go @@ -0,0 +1,61 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyId": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist.", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. +} diff --git a/vendor/github.com/minio/minio-go/transport.go b/vendor/github.com/minio/minio-go/transport.go new file mode 100644 index 0000000..88700cf --- /dev/null +++ b/vendor/github.com/minio/minio-go/transport.go @@ -0,0 +1,50 @@ +// +build go1.7 go1.8 + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2017-2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net" + "net/http" + "time" +) + +// DefaultTransport - this default transport is similar to +// http.DefaultTransport but with additional param DisableCompression +// is set to true to avoid decompressing content with 'gzip' encoding. +var DefaultTransport http.RoundTripper = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + // Set this value so that the underlying transport round-tripper + // doesn't try to auto decode the body of objects with + // content-encoding set to `gzip`. + // + // Refer: + // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 + DisableCompression: true, +} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go new file mode 100644 index 0000000..8483f38 --- /dev/null +++ b/vendor/github.com/minio/minio-go/utils.go @@ -0,0 +1,272 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// xmlDecoder provide decoded value in xml. +func xmlDecoder(body io.Reader, v interface{}) error { + d := xml.NewDecoder(body) + return d.Decode(v) +} + +// sum256 calculate sha256sum for an input byte array, returns hex encoded. +func sum256Hex(data []byte) string { + hash := sha256.New() + hash.Write(data) + return hex.EncodeToString(hash.Sum(nil)) +} + +// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded. +func sumMD5Base64(data []byte) string { + hash := md5.New() + hash.Write(data) + return base64.StdEncoding.EncodeToString(hash.Sum(nil)) +} + +// getEndpointURL - construct a new endpoint. +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { + if strings.Contains(endpoint, ":") { + host, _, err := net.SplitHostPort(endpoint) + if err != nil { + return nil, err + } + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } else { + if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { + msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." + return nil, ErrInvalidArgument(msg) + } + } + // If secure is false, use 'http' scheme. + scheme := "https" + if !secure { + scheme = "http" + } + + // Construct a secured endpoint URL. + endpointURLStr := scheme + "://" + endpoint + endpointURL, err := url.Parse(endpointURLStr) + if err != nil { + return nil, err + } + + // Validate incoming endpoint URL. + if err := isValidEndpointURL(*endpointURL); err != nil { + return nil, err + } + return endpointURL, nil +} + +// closeResponse close non nil response with any response Body. +// convenient wrapper to drain any remaining data on response body. +// +// Subsequently this allows golang http RoundTripper +// to re-use the same connection for future requests. +func closeResponse(resp *http.Response) { + // Callers should close resp.Body when done reading from it. + // If resp.Body is not closed, the Client's underlying RoundTripper + // (typically Transport) may not be able to re-use a persistent TCP + // connection to the server for a subsequent "keep-alive" request. + if resp != nil && resp.Body != nil { + // Drain any remaining Body and then close the connection. + // Without this closing connection would disallow re-using + // the same connection for future uses. + // - http://stackoverflow.com/a/17961593/4465767 + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +var ( + // Hex encoded string of nil sha256sum bytes. + emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + + // Sentinel URL is the default url value which is invalid. + sentinelURL = url.URL{} +) + +// Verify if input endpoint URL is valid. +func isValidEndpointURL(endpointURL url.URL) error { + if endpointURL == sentinelURL { + return ErrInvalidArgument("Endpoint url cannot be empty.") + } + if endpointURL.Path != "/" && endpointURL.Path != "" { + return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") + } + if strings.Contains(endpointURL.Host, ".s3.amazonaws.com") { + if !s3utils.IsAmazonEndpoint(endpointURL) { + return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.") + } + } + if strings.Contains(endpointURL.Host, ".googleapis.com") { + if !s3utils.IsGoogleEndpoint(endpointURL) { + return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.") + } + } + return nil +} + +// Verify if input expires value is valid. +func isValidExpiry(expires time.Duration) error { + expireSeconds := int64(expires / time.Second) + if expireSeconds < 1 { + return ErrInvalidArgument("Expires cannot be lesser than 1 second.") + } + if expireSeconds > 604800 { + return ErrInvalidArgument("Expires cannot be greater than 7 days.") + } + return nil +} + +// make a copy of http.Header +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// Filter relevant response headers from +// the HEAD, GET http response. The function takes +// a list of headers which are filtered out and +// returned as a new http header. +func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) { + filteredHeader = cloneHeader(header) + for _, key := range filterKeys { + filteredHeader.Del(key) + } + return filteredHeader +} + +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + +// Redact out signature value from authorization string. +func redactSignature(origAuth string) string { + if !strings.HasPrefix(origAuth, signV4Algorithm) { + // Set a temporary redacted auth + return "AWS **REDACTED**:**REDACTED**" + } + + /// Signature V4 authorization header. + + // Strip out accessKeyID from: + // Credential=////aws4_request + newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") + + // Strip out 256-bit signature from: Signature=<256-bit signature> + return regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") +} + +// Get default location returns the location based on the input +// URL `u`, if region override is provided then all location +// defaults to regionOverride. +// +// If no other cases match then the location is set to `us-east-1` +// as a last resort. +func getDefaultLocation(u url.URL, regionOverride string) (location string) { + if regionOverride != "" { + return regionOverride + } + region := s3utils.GetRegionFromURL(u) + if region == "" { + region = "us-east-1" + } + return region +} + +var supportedHeaders = []string{ + "content-type", + "cache-control", + "content-encoding", + "content-disposition", + "content-language", + "x-amz-website-redirect-location", + "expires", + // Add more supported headers here. +} + +// isStorageClassHeader returns true if the header is a supported storage class header +func isStorageClassHeader(headerKey string) bool { + return strings.ToLower(amzStorageClass) == strings.ToLower(headerKey) +} + +// isStandardHeader returns true if header is a supported header and not a custom header +func isStandardHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, header := range supportedHeaders { + if strings.ToLower(header) == key { + return true + } + } + return false +} + +// sseHeaders is list of server side encryption headers +var sseHeaders = []string{ + "x-amz-server-side-encryption", + "x-amz-server-side-encryption-aws-kms-key-id", + "x-amz-server-side-encryption-context", + "x-amz-server-side-encryption-customer-algorithm", + "x-amz-server-side-encryption-customer-key", + "x-amz-server-side-encryption-customer-key-MD5", +} + +// isSSEHeader returns true if header is a server side encryption header. +func isSSEHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + for _, h := range sseHeaders { + if strings.ToLower(h) == key { + return true + } + } + return false +} + +// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header. +func isAmzHeader(headerKey string) bool { + key := strings.ToLower(headerKey) + + return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 0000000..f9c841a --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 0000000..d70706d --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod new file mode 100644 index 0000000..7efa09a --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000..2537853 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/pborman/uuid/.travis.yml b/vendor/github.com/pborman/uuid/.travis.yml new file mode 100644 index 0000000..3deb4a1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/.travis.yml @@ -0,0 +1,10 @@ +language: go + +go: + - "1.9" + - "1.10" + - "1.11" + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTING.md b/vendor/github.com/pborman/uuid/CONTRIBUTING.md new file mode 100644 index 0000000..04fdf09 --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/pborman/uuid/CONTRIBUTORS b/vendor/github.com/pborman/uuid/CONTRIBUTORS new file mode 100644 index 0000000..b382a04 --- /dev/null +++ b/vendor/github.com/pborman/uuid/CONTRIBUTORS @@ -0,0 +1 @@ +Paul Borman diff --git a/vendor/github.com/pborman/uuid/LICENSE b/vendor/github.com/pborman/uuid/LICENSE new file mode 100644 index 0000000..5dc6826 --- /dev/null +++ b/vendor/github.com/pborman/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pborman/uuid/README.md b/vendor/github.com/pborman/uuid/README.md new file mode 100644 index 0000000..810ad40 --- /dev/null +++ b/vendor/github.com/pborman/uuid/README.md @@ -0,0 +1,15 @@ +This project was automatically exported from code.google.com/p/go-uuid + +# uuid ![build status](https://travis-ci.org/pborman/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on [RFC 4122](http://tools.ietf.org/html/rfc4122) and DCE 1.1: Authentication and Security Services. + +This package now leverages the github.com/google/uuid package (which is based off an earlier version of this package). + +###### Install +`go get github.com/pborman/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/pborman/uuid?status.svg)](http://godoc.org/github.com/pborman/uuid) + +Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: +http://godoc.org/github.com/pborman/uuid diff --git a/vendor/github.com/pborman/uuid/dce.go b/vendor/github.com/pborman/uuid/dce.go new file mode 100644 index 0000000..50a0f2d --- /dev/null +++ b/vendor/github.com/pborman/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/pborman/uuid/doc.go b/vendor/github.com/pborman/uuid/doc.go new file mode 100644 index 0000000..727d761 --- /dev/null +++ b/vendor/github.com/pborman/uuid/doc.go @@ -0,0 +1,13 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// This package is a partial wrapper around the github.com/google/uuid package. +// This package represents a UUID as []byte while github.com/google/uuid +// represents a UUID as [16]byte. +package uuid diff --git a/vendor/github.com/pborman/uuid/go.mod b/vendor/github.com/pborman/uuid/go.mod new file mode 100644 index 0000000..099fc7d --- /dev/null +++ b/vendor/github.com/pborman/uuid/go.mod @@ -0,0 +1,3 @@ +module github.com/pborman/uuid + +require github.com/google/uuid v1.0.0 diff --git a/vendor/github.com/pborman/uuid/go.sum b/vendor/github.com/pborman/uuid/go.sum new file mode 100644 index 0000000..db2574a --- /dev/null +++ b/vendor/github.com/pborman/uuid/go.sum @@ -0,0 +1,2 @@ +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= diff --git a/vendor/github.com/pborman/uuid/hash.go b/vendor/github.com/pborman/uuid/hash.go new file mode 100644 index 0000000..a0420c1 --- /dev/null +++ b/vendor/github.com/pborman/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/pborman/uuid/marshal.go b/vendor/github.com/pborman/uuid/marshal.go new file mode 100644 index 0000000..35b8935 --- /dev/null +++ b/vendor/github.com/pborman/uuid/marshal.go @@ -0,0 +1,85 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "errors" + "fmt" + + guuid "github.com/google/uuid" +) + +// MarshalText implements encoding.TextMarshaler. +func (u UUID) MarshalText() ([]byte, error) { + if len(u) != 16 { + return nil, nil + } + var js [36]byte + encodeHex(js[:], u) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *UUID) UnmarshalText(data []byte) error { + if len(data) == 0 { + return nil + } + id := Parse(string(data)) + if id == nil { + return errors.New("invalid UUID") + } + *u = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u UUID) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) == 0 { + return nil + } + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + var id [16]byte + copy(id[:], data) + *u = id[:] + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (u Array) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], u[:]) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (u *Array) UnmarshalText(data []byte) error { + id, err := guuid.ParseBytes(data) + if err != nil { + return err + } + *u = Array(id) + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (u Array) MarshalBinary() ([]byte, error) { + return u[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (u *Array) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(u[:], data) + return nil +} diff --git a/vendor/github.com/pborman/uuid/node.go b/vendor/github.com/pborman/uuid/node.go new file mode 100644 index 0000000..e524e01 --- /dev/null +++ b/vendor/github.com/pborman/uuid/node.go @@ -0,0 +1,50 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + guuid "github.com/google/uuid" +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return guuid.NodeInterface() +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + return guuid.SetNodeInterface(name) +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + return guuid.NodeID() +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + return guuid.SetNodeID(id) +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/pborman/uuid/sql.go b/vendor/github.com/pborman/uuid/sql.go new file mode 100644 index 0000000..929c384 --- /dev/null +++ b/vendor/github.com/pborman/uuid/sql.go @@ -0,0 +1,68 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "errors" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src.(type) { + case string: + // if an empty UUID comes from a table, we return a null UUID + if src.(string) == "" { + return nil + } + + // see uuid.Parse for required string format + parsed := Parse(src.(string)) + + if parsed == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = parsed + case []byte: + b := src.([]byte) + + // if an empty UUID comes from a table, we return a null UUID + if len(b) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(b) == 16 { + parsed := make([]byte, 16) + copy(parsed, b) + *uuid = UUID(parsed) + } else { + u := Parse(string(b)) + + if u == nil { + return errors.New("Scan: invalid UUID format") + } + + *uuid = u + } + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/pborman/uuid/time.go b/vendor/github.com/pborman/uuid/time.go new file mode 100644 index 0000000..5c0960d --- /dev/null +++ b/vendor/github.com/pborman/uuid/time.go @@ -0,0 +1,57 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + + guuid "github.com/google/uuid" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time = guuid.Time + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { return guuid.GetTime() } + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { return guuid.ClockSequence() } + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { guuid.SetClockSequence(seq) } + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/pborman/uuid/util.go b/vendor/github.com/pborman/uuid/util.go new file mode 100644 index 0000000..255b5e2 --- /dev/null +++ b/vendor/github.com/pborman/uuid/util.go @@ -0,0 +1,32 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/pborman/uuid/uuid.go b/vendor/github.com/pborman/uuid/uuid.go new file mode 100644 index 0000000..3370004 --- /dev/null +++ b/vendor/github.com/pborman/uuid/uuid.go @@ -0,0 +1,162 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "io" + + guuid "github.com/google/uuid" +) + +// Array is a pass-by-value UUID that can be used as an effecient key in a map. +type Array [16]byte + +// UUID converts uuid into a slice. +func (uuid Array) UUID() UUID { + return uuid[:] +} + +// String returns the string representation of uuid, +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (uuid Array) String() string { + return guuid.UUID(uuid).String() +} + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version = guuid.Version + +// A Variant represents a UUIDs variant. +type Variant = guuid.Variant + +// Constants returned by Variant. +const ( + Invalid = guuid.Invalid // Invalid UUID + RFC4122 = guuid.RFC4122 // The variant specified in RFC4122 + Reserved = guuid.Reserved // Reserved, NCS backward compatibility. + Microsoft = guuid.Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future = guuid.Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. See github.com/google/uuid for +// the formats parsed. +func Parse(s string) UUID { + gu, err := guuid.Parse(s) + if err == nil { + return gu[:] + } + return nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + gu, err := guuid.ParseBytes(b) + if err == nil { + return gu[:], nil + } + return nil, err +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// Array returns an array representation of uuid that can be used as a map key. +// Array panics if uuid is not valid. +func (uuid UUID) Array() Array { + if len(uuid) != 16 { + panic("invalid uuid") + } + var a Array + copy(a[:], uuid) + return a +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if len(uuid) != 16 { + return "" + } + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if len(uuid) != 16 { + return "" + } + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst[:], uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + guuid.SetRand(r) +} diff --git a/vendor/github.com/pborman/uuid/version1.go b/vendor/github.com/pborman/uuid/version1.go new file mode 100644 index 0000000..7af948d --- /dev/null +++ b/vendor/github.com/pborman/uuid/version1.go @@ -0,0 +1,23 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + guuid "github.com/google/uuid" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + gu, err := guuid.NewUUID() + if err == nil { + return UUID(gu[:]) + } + return nil +} diff --git a/vendor/github.com/pborman/uuid/version4.go b/vendor/github.com/pborman/uuid/version4.go new file mode 100644 index 0000000..b459d46 --- /dev/null +++ b/vendor/github.com/pborman/uuid/version4.go @@ -0,0 +1,26 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import guuid "github.com/google/uuid" + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + if gu, err := guuid.NewRandom(); err == nil { + return UUID(gu[:]) + } + return nil +} diff --git a/vendor/github.com/sergi/go-diff/AUTHORS b/vendor/github.com/sergi/go-diff/AUTHORS new file mode 100644 index 0000000..2d7bb2b --- /dev/null +++ b/vendor/github.com/sergi/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/vendor/github.com/sergi/go-diff/CONTRIBUTORS new file mode 100644 index 0000000..369e3d5 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 0000000..937942c --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 0000000..82ad7bc --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1344 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + return append(slice[:index], append(elements, slice[index+amount:]...)...) +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...) + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + short, long := text1, text2 + if len(short) > len(long) { + short, long = long, short + } + for i, r := range short { + if r != long[i] { + return i + } + } + return len(short) +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + n := min(len(text1), len(text2)) + for i := 0; i < n; i++ { + if text1[len(text1)-i-1] != text2[len(text2)-i-1] { + return i + } + } + return n + + // TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54 + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + /* + pointermin := 0 + pointermax := math.Min(len(text1), len(text2)) + pointermid := pointermax + pointerend := 0 + for pointermin < pointermid { + if text1[len(text1)-pointermid:len(text1)-pointerend] == + text2[len(text2)-pointermid:len(text2)-pointerend] { + pointermin = pointermid + pointerend = pointermin + } else { + pointermax = pointermid + } + pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) + } + return pointermid + */ +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + + equalities = &equality{ + data: pointer, + next: equalities, + } + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += len(diffs[pointer].Text) + } else { + lengthDeletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= difference1) && + (len(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities.data + diffs = append( + diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: abcxxxxxxdef + // -> abcxxxdef + // e.g: xxxabcdefxxx + // -> defxxxabc + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(len(deletion))/2 || + float64(overlapLength1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = append( + diffs[:pointer], + append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...) + + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(len(deletion))/2 || + float64(overlapLength2) >= float64(len(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = append( + diffs[:pointer], + append([]Diff{overlap}, diffs[pointer:]...)...) + + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The cat came. -> The cat came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // ABXYCD + // AXCD + // ABXC + // AXCD + // ABXC + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = append(diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: ABAC -> ABAC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶
", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffDelete: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + case DiffEqual: + _, _ = buff.WriteString("") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += len(aDiff.Text) + case DiffDelete: + deletions += len(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 0000000..d3acc32 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 0000000..17374e1 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)< y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 0000000..223c43c --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + Start1 int + Start2 int + Length1 int + Length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indices are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.Length1 == 0 { + coords1 = strconv.Itoa(p.Start1) + ",0" + } else if p.Length1 == 1 { + coords1 = strconv.Itoa(p.Start1 + 1) + } else { + coords1 = strconv.Itoa(p.Start1+1) + "," + strconv.Itoa(p.Length1) + } + + if p.Length2 == 0 { + coords2 = strconv.Itoa(p.Start2) + ",0" + } else if p.Length2 == 1 { + coords2 = strconv.Itoa(p.Start2 + 1) + } else { + coords2 = strconv.Itoa(p.Start2+1) + "," + strconv.Itoa(p.Length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.Start2 : patch.Start2+patch.Length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.Start2-padding) + minEnd := min(len(text), patch.Start2+patch.Length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.Start2-padding):patch.Start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.Start2+patch.Length1 : min(len(text), patch.Start2+patch.Length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.Start1 -= len(prefix) + patch.Start2 -= len(prefix) + // Extend the lengths. + patch.Length1 += len(prefix) + len(suffix) + patch.Length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.Start1 = charCount1 + patch.Start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.Length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.Length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.Length1 += len(aDiff.Text) + patch.Length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.Start1 = aPatch.Start1 + patchCopy.Start2 = aPatch.Start2 + patchCopy.Length1 = aPatch.Length1 + patchCopy.Length2 = aPatch.Length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.Start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.Length2 - aPatch.Length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].Start1 += paddingLength + patches[i].Start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].Start1 -= paddingLength // Should be 0. + patches[0].Start2 -= paddingLength // Should be 0. + patches[0].Length1 += paddingLength + patches[0].Length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].Start1 -= extraLength + patches[0].Start2 -= extraLength + patches[0].Length1 += extraLength + patches[0].Length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].Length1 += paddingLength + patches[last].Length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].Length1 += extraLength + patches[last].Length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].Length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + Start1 := bigpatch.Start1 + Start2 := bigpatch.Start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.Start1 = Start1 - len(precontext) + patch.Start2 = Start2 - len(precontext) + if len(precontext) != 0 { + patch.Length1 = len(precontext) + patch.Length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.Length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.Length2 += len(diffText) + Start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.Length1 += len(diffText) + Start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.Length1-dmp.PatchMargin)] + + patch.Length1 += len(diffText) + Start1 += len(diffText) + if diffType == DiffEqual { + patch.Length2 += len(diffText) + Start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.Length1 += len(postcontext) + patch.Length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.Start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.Start1-- + patch.Length1 = 1 + } else if m[2] == "0" { + patch.Length1 = 0 + } else { + patch.Start1-- + patch.Length1, _ = strconv.Atoi(m[2]) + } + + patch.Start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.Start2-- + patch.Length2 = 1 + } else if m[4] == "0" { + patch.Length2 = 0 + } else { + patch.Start2-- + patch.Length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 0000000..265f29c --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,88 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} diff --git a/vendor/github.com/shurcooL/highlight_diff/.travis.yml b/vendor/github.com/shurcooL/highlight_diff/.travis.yml new file mode 100644 index 0000000..93b1fcd --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_diff/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go +go: + - 1.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/highlight_diff/LICENSE b/vendor/github.com/shurcooL/highlight_diff/LICENSE new file mode 100644 index 0000000..ff0916e --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_diff/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014 Dmitri Shuralyov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/shurcooL/highlight_diff/README.md b/vendor/github.com/shurcooL/highlight_diff/README.md new file mode 100644 index 0000000..2b38536 --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_diff/README.md @@ -0,0 +1,22 @@ +highlight_diff +============== + +[![Build Status](https://travis-ci.org/shurcooL/highlight_diff.svg?branch=master)](https://travis-ci.org/shurcooL/highlight_diff) [![GoDoc](https://godoc.org/github.com/shurcooL/highlight_diff?status.svg)](https://godoc.org/github.com/shurcooL/highlight_diff) + +Package highlight_diff provides syntaxhighlight.Printer and syntaxhighlight.Annotator implementations +for diff format. It implements intra-block character-level inner diff highlighting. + +It uses GitHub Flavored Markdown .css class names "gi", "gd", "gu", "gh" for outer blocks, +"x" for inner emphasis blocks. + +Installation +------------ + +```bash +go get -u github.com/shurcooL/highlight_diff +``` + +License +------- + +- [MIT License](LICENSE) diff --git a/vendor/github.com/shurcooL/highlight_diff/main.go b/vendor/github.com/shurcooL/highlight_diff/main.go new file mode 100644 index 0000000..16732d5 --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_diff/main.go @@ -0,0 +1,179 @@ +// Package highlight_diff provides syntaxhighlight.Printer and syntaxhighlight.Annotator implementations +// for diff format. It implements intra-block character-level inner diff highlighting. +// +// It uses GitHub Flavored Markdown .css class names "gi", "gd", "gu", "gh" for outer blocks, +// "x" for inner emphasis blocks. +package highlight_diff + +import ( + "bufio" + "bytes" + "io" + "text/template" + + "github.com/sergi/go-diff/diffmatchpatch" + "github.com/sourcegraph/annotate" + "github.com/sourcegraph/syntaxhighlight" +) + +var gfmDiff = HTMLConfig{ + "", + "gi", + "gd", + "gu", + "gh", +} + +func Print(s *Scanner, w io.Writer) error { + var p syntaxhighlight.Printer = HTMLPrinter(gfmDiff) + + for s.Scan() { + tok, kind := s.Token() + err := p.Print(w, kind, string(tok)) + if err != nil { + return err + } + } + + return s.Err() +} + +type HTMLConfig []string + +type HTMLPrinter HTMLConfig + +func (p HTMLPrinter) Print(w io.Writer, kind syntaxhighlight.Kind, tokText string) error { + class := HTMLConfig(p)[kind] + if class != "" { + _, err := w.Write([]byte(``)) + if err != nil { + return err + } + } + template.HTMLEscape(w, []byte(tokText)) + if class != "" { + _, err := w.Write([]byte(``)) + if err != nil { + return err + } + } + return nil +} + +type Scanner struct { + br *bufio.Reader + line []byte +} + +func NewScanner(src []byte) *Scanner { + r := bytes.NewReader(src) + return &Scanner{br: bufio.NewReader(r)} +} + +func (s *Scanner) Scan() bool { + var err error + s.line, err = s.br.ReadBytes('\n') + return err == nil +} + +func (s *Scanner) Token() ([]byte, syntaxhighlight.Kind) { + var kind syntaxhighlight.Kind + switch { + // The backslash is to detect "\ No newline at end of file" lines. + case len(s.line) == 0 || s.line[0] == ' ' || s.line[0] == '\\': + kind = 0 + case s.line[0] == '+': + //kind = 1 + kind = 0 + case s.line[0] == '-': + //kind = 2 + kind = 0 + case s.line[0] == '@': + kind = 3 + default: + kind = 4 + } + return s.line, kind +} + +func (s *Scanner) Err() error { + return nil +} + +// --- + +type HTMLAnnotator HTMLConfig + +func (a HTMLAnnotator) Annotate(start int, kind syntaxhighlight.Kind, tokText string) (*annotate.Annotation, error) { + class := HTMLConfig(a)[kind] + if class != "" { + left := []byte(``)...) + return &annotate.Annotation{ + Start: start, End: start + len(tokText), + Left: left, Right: []byte(""), + }, nil + } + return nil, nil +} + +func Annotate(src []byte) (annotate.Annotations, error) { + var a syntaxhighlight.Annotator = HTMLAnnotator(gfmDiff) + s := NewScanner(src) + + var anns annotate.Annotations + read := 0 + for s.Scan() { + tok, kind := s.Token() + ann, err := a.Annotate(read, kind, string(tok)) + if err != nil { + return nil, err + } + read += len(tok) + if ann != nil { + anns = append(anns, ann) + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + return anns, nil +} + +// --- + +func HighlightedDiffFunc(leftContent, rightContent string, segments *[2][]*annotate.Annotation, offsets [2]int) { + dmp := diffmatchpatch.New() + diffs := dmp.DiffMain(leftContent, rightContent, true) + + for side := range *segments { + offset := offsets[side] + + for _, diff := range diffs { + if side == 0 && diff.Type == -1 { + (*segments)[side] = append((*segments)[side], &annotate.Annotation{Start: offset, End: offset + len(diff.Text), Left: []byte(``), Right: []byte(``), WantInner: 1}) + offset += len(diff.Text) + } + if side == 1 && diff.Type == +1 { + (*segments)[side] = append((*segments)[side], &annotate.Annotation{Start: offset, End: offset + len(diff.Text), Left: []byte(``), Right: []byte(``), WantInner: 1}) + offset += len(diff.Text) + } + if diff.Type == 0 { + offset += len(diff.Text) + } + } + } +} diff --git a/vendor/github.com/shurcooL/highlight_go/.travis.yml b/vendor/github.com/shurcooL/highlight_go/.travis.yml new file mode 100644 index 0000000..93b1fcd --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go +go: + - 1.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/highlight_go/README.md b/vendor/github.com/shurcooL/highlight_go/README.md new file mode 100644 index 0000000..5467995 --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/README.md @@ -0,0 +1,18 @@ +highlight_go +============ + +[![Build Status](https://travis-ci.org/shurcooL/highlight_go.svg?branch=master)](https://travis-ci.org/shurcooL/highlight_go) [![GoDoc](https://godoc.org/github.com/shurcooL/highlight_go?status.svg)](https://godoc.org/github.com/shurcooL/highlight_go) + +Package highlight_go provides a syntax highlighter for Go, using go/scanner. + +Installation +------------ + +```bash +go get -u github.com/shurcooL/highlight_go +``` + +License +------- + +- [MIT License](https://opensource.org/licenses/mit-license.php) diff --git a/vendor/github.com/shurcooL/highlight_go/internal/go/LICENSE b/vendor/github.com/shurcooL/highlight_go/internal/go/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/internal/go/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/shurcooL/highlight_go/internal/go/scanner/errors.go b/vendor/github.com/shurcooL/highlight_go/internal/go/scanner/errors.go new file mode 100644 index 0000000..2ebace7 --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/internal/go/scanner/errors.go @@ -0,0 +1,125 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scanner + +import ( + "fmt" + "io" + "sort" + + "github.com/shurcooL/highlight_go/internal/go/token" +) + +// In an ErrorList, an error is represented by an *Error. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +// +type Error struct { + Pos token.Position + Msg string +} + +// Error implements the error interface. +func (e Error) Error() string { + if e.Pos.Filename != "" || e.Pos.IsValid() { + // don't print "" + // TODO(gri) reconsider the semantics of Position.IsValid + return e.Pos.String() + ": " + e.Msg + } + return e.Msg +} + +// ErrorList is a list of *Errors. +// The zero value for an ErrorList is an empty ErrorList ready to use. +// +type ErrorList []*Error + +// Add adds an Error with given position and error message to an ErrorList. +func (p *ErrorList) Add(pos token.Position, msg string) { + *p = append(*p, &Error{pos, msg}) +} + +// Reset resets an ErrorList to no errors. +func (p *ErrorList) Reset() { *p = (*p)[0:0] } + +// ErrorList implements the sort Interface. +func (p ErrorList) Len() int { return len(p) } +func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p ErrorList) Less(i, j int) bool { + e := &p[i].Pos + f := &p[j].Pos + // Note that it is not sufficient to simply compare file offsets because + // the offsets do not reflect modified line information (through //line + // comments). + if e.Filename != f.Filename { + return e.Filename < f.Filename + } + if e.Line != f.Line { + return e.Line < f.Line + } + if e.Column != f.Column { + return e.Column < f.Column + } + return p[i].Msg < p[j].Msg +} + +// Sort sorts an ErrorList. *Error entries are sorted by position, +// other errors are sorted by error message, and before any *Error +// entry. +// +func (p ErrorList) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an ErrorList and removes all but the first error per line. +func (p *ErrorList) RemoveMultiples() { + sort.Sort(p) + var last token.Position // initial last.Line is != any legal error line + i := 0 + for _, e := range *p { + if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { + last = e.Pos + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +// An ErrorList implements the error interface. +func (p ErrorList) Error() string { + switch len(p) { + case 0: + return "no errors" + case 1: + return p[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p ErrorList) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// PrintError is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an ErrorList. Otherwise +// it prints the err string. +// +func PrintError(w io.Writer, err error) { + if list, ok := err.(ErrorList); ok { + for _, e := range list { + fmt.Fprintf(w, "%s\n", e) + } + } else if err != nil { + fmt.Fprintf(w, "%s\n", err) + } +} diff --git a/vendor/github.com/shurcooL/highlight_go/internal/go/scanner/scanner.go b/vendor/github.com/shurcooL/highlight_go/internal/go/scanner/scanner.go new file mode 100644 index 0000000..f4e735a --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/internal/go/scanner/scanner.go @@ -0,0 +1,850 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scanner is a copy of go/scanner from Go 1.11.1 with +// https://golang.org/issue/28112 resolved (via https://golang.org/cl/141337). +// It is also modified to use a copy of go/token from Go 1.11.1. +package scanner + +import ( + "bytes" + "fmt" + "path/filepath" + "strconv" + "unicode" + "unicode/utf8" + + "github.com/shurcooL/highlight_go/internal/go/token" +) + +// An ErrorHandler may be provided to Scanner.Init. If a syntax error is +// encountered and a handler was installed, the handler is called with a +// position and an error message. The position points to the beginning of +// the offending token. +// +type ErrorHandler func(pos token.Position, msg string) + +// A Scanner holds the scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +// +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + err ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + insertSemi bool // insert a semicolon before next newline + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +const bom = 0xFEFF // byte order mark, only permitted as very first character + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +// +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.error(s.offset, "illegal character NUL") + case r >= utf8.RuneSelf: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.error(s.offset, "illegal UTF-8 encoding") + } else if r == bom && s.offset > 0 { + s.error(s.offset, "illegal byte order mark") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// peek returns the byte following the most recently read character without +// advancing the scanner. If the scanner is at EOF, peek returns 0. +func (s *Scanner) peek() byte { + if s.rdOffset < len(s.src) { + return s.src[s.rdOffset] + } + return 0 +} + +// A mode value is a set of flags (or 0). +// They control scanner behavior. +// +type Mode uint + +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens + dontInsertSemis // do not automatically insert semicolons - for testing only +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +// +func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.err = err + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.insertSemi = false + s.ErrorCount = 0 + + s.next() + if s.ch == bom { + s.next() // ignore BOM at file beginning + } +} + +func (s *Scanner) error(offs int, msg string) { + if s.err != nil { + s.err(s.file.Position(s.file.Pos(offs)), msg) + } + s.ErrorCount++ +} + +func (s *Scanner) scanComment() string { + // initial '/' already consumed; s.ch == '/' || s.ch == '*' + offs := s.offset - 1 // position of initial '/' + next := -1 // position immediately following the comment; < 0 means invalid comment + numCR := 0 + + if s.ch == '/' { + //-style comment + // (the final '\n' is not considered part of the comment) + s.next() + for s.ch != '\n' && s.ch >= 0 { + if s.ch == '\r' { + numCR++ + } + s.next() + } + // if we are at '\n', the position following the comment is afterwards + next = s.offset + if s.ch == '\n' { + next++ + } + goto exit + } + + /*-style comment */ + s.next() + for s.ch >= 0 { + ch := s.ch + if ch == '\r' { + numCR++ + } + s.next() + if ch == '*' && s.ch == '/' { + s.next() + next = s.offset + goto exit + } + } + + s.error(offs, "comment not terminated") + +exit: + lit := s.src[offs:s.offset] + + // On Windows, a (//-comment) line may end in "\r\n". + // Remove the final '\r' before analyzing the text for + // line directives (matching the compiler). Remove any + // other '\r' afterwards (matching the pre-existing be- + // havior of the scanner). + if numCR > 0 && len(lit) >= 2 && lit[1] == '/' && lit[len(lit)-1] == '\r' { + lit = lit[:len(lit)-1] + numCR-- + } + + // interpret line directives + // (//line directives must start at the beginning of the current line) + if next >= 0 /* implies valid comment */ && (lit[1] == '*' || offs == s.lineOffset) && bytes.HasPrefix(lit[2:], prefix) { + s.updateLineInfo(next, offs, lit) + } + + if numCR > 0 { + lit = stripCR(lit, lit[1] == '*') + } + + return string(lit) +} + +var prefix = []byte("line ") + +// updateLineInfo parses the incoming comment text at offset offs +// as a line directive. If successful, it updates the line info table +// for the position next per the line directive. +func (s *Scanner) updateLineInfo(next, offs int, text []byte) { + // extract comment text + if text[1] == '*' { + text = text[:len(text)-2] // lop off trailing "*/" + } + text = text[7:] // lop off leading "//line " or "/*line " + offs += 7 + + i, n, ok := trailingDigits(text) + if i == 0 { + return // ignore (not a line directive) + } + // i > 0 + + if !ok { + // text has a suffix :xxx but xxx is not a number + s.error(offs+i, "invalid line number: "+string(text[i:])) + return + } + + var line, col int + i2, n2, ok2 := trailingDigits(text[:i-1]) + if ok2 { + //line filename:line:col + i, i2 = i2, i + line, col = n2, n + if col == 0 { + s.error(offs+i2, "invalid column number: "+string(text[i2:])) + return + } + text = text[:i2-1] // lop off ":col" + } else { + //line filename:line + line = n + } + + if line == 0 { + s.error(offs+i, "invalid line number: "+string(text[i:])) + return + } + + // If we have a column (//line filename:line:col form), + // an empty filename means to use the previous filename. + filename := string(text[:i-1]) // lop off ":line", and trim white space + if filename == "" && ok2 { + filename = s.file.Position(s.file.Pos(offs)).Filename + } else if filename != "" { + // Put a relative filename in the current directory. + // This is for compatibility with earlier releases. + // See issue 26671. + filename = filepath.Clean(filename) + if !filepath.IsAbs(filename) { + filename = filepath.Join(s.dir, filename) + } + } + + s.file.AddLineColumnInfo(next, filename, line, col) +} + +func trailingDigits(text []byte) (int, int, bool) { + i := bytes.LastIndexByte(text, ':') // look from right (Windows filenames may contain ':') + if i < 0 { + return 0, 0, false // no ":" + } + // i >= 0 + n, err := strconv.ParseUint(string(text[i+1:]), 10, 0) + return i + 1, int(n), err == nil +} + +func (s *Scanner) findLineEnd() bool { + // initial '/' already consumed + + defer func(offs int) { + // reset scanner state to where it was upon calling findLineEnd + s.ch = '/' + s.offset = offs + s.rdOffset = offs + 1 + s.next() // consume initial '/' again + }(s.offset - 1) + + // read ahead until a newline, EOF, or non-comment token is found + for s.ch == '/' || s.ch == '*' { + if s.ch == '/' { + //-style comment always contains a newline + return true + } + /*-style comment: look for newline */ + s.next() + for s.ch >= 0 { + ch := s.ch + if ch == '\n' { + return true + } + s.next() + if ch == '*' && s.ch == '/' { + s.next() + break + } + } + s.skipWhitespace() // s.insertSemi is set + if s.ch < 0 || s.ch == '\n' { + return true + } + if s.ch != '/' { + // non-comment token + return false + } + s.next() // consume '/' + } + + return false +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= utf8.RuneSelf && unicode.IsDigit(ch) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func (s *Scanner) scanMantissa(base int) { + for digitVal(s.ch) < base { + s.next() + } +} + +func (s *Scanner) scanNumber(seenDecimalPoint bool) (token.Token, string) { + // digitVal(s.ch) < 10 + offs := s.offset + tok := token.INT + + if seenDecimalPoint { + offs-- + tok = token.FLOAT + s.scanMantissa(10) + goto exponent + } + + if s.ch == '0' { + // int or float + offs := s.offset + s.next() + if s.ch == 'x' || s.ch == 'X' { + // hexadecimal int + s.next() + s.scanMantissa(16) + if s.offset-offs <= 2 { + // only scanned "0x" or "0X" + s.error(offs, "illegal hexadecimal number") + } + } else { + // octal int or float + seenDecimalDigit := false + s.scanMantissa(8) + if s.ch == '8' || s.ch == '9' { + // illegal octal int or float + seenDecimalDigit = true + s.scanMantissa(10) + } + if s.ch == '.' || s.ch == 'e' || s.ch == 'E' || s.ch == 'i' { + goto fraction + } + // octal int + if seenDecimalDigit { + s.error(offs, "illegal octal number") + } + } + goto exit + } + + // decimal int or float + s.scanMantissa(10) + +fraction: + if s.ch == '.' { + tok = token.FLOAT + s.next() + s.scanMantissa(10) + } + +exponent: + if s.ch == 'e' || s.ch == 'E' { + tok = token.FLOAT + s.next() + if s.ch == '-' || s.ch == '+' { + s.next() + } + if digitVal(s.ch) < 10 { + s.scanMantissa(10) + } else { + s.error(offs, "illegal floating-point exponent") + } + } + + if s.ch == 'i' { + tok = token.IMAG + s.next() + } + +exit: + return tok, string(s.src[offs:s.offset]) +} + +// scanEscape parses an escape sequence where rune is the accepted +// escaped quote. In case of a syntax error, it stops at the offending +// character (without consuming it) and returns false. Otherwise +// it returns true. +func (s *Scanner) scanEscape(quote rune) bool { + offs := s.offset + + var n int + var base, max uint32 + switch s.ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + s.next() + return true + case '0', '1', '2', '3', '4', '5', '6', '7': + n, base, max = 3, 8, 255 + case 'x': + s.next() + n, base, max = 2, 16, 255 + case 'u': + s.next() + n, base, max = 4, 16, unicode.MaxRune + case 'U': + s.next() + n, base, max = 8, 16, unicode.MaxRune + default: + msg := "unknown escape sequence" + if s.ch < 0 { + msg = "escape sequence not terminated" + } + s.error(offs, msg) + return false + } + + var x uint32 + for n > 0 { + d := uint32(digitVal(s.ch)) + if d >= base { + msg := fmt.Sprintf("illegal character %#U in escape sequence", s.ch) + if s.ch < 0 { + msg = "escape sequence not terminated" + } + s.error(s.offset, msg) + return false + } + x = x*base + d + s.next() + n-- + } + + if x > max || 0xD800 <= x && x < 0xE000 { + s.error(offs, "escape sequence is invalid Unicode code point") + return false + } + + return true +} + +func (s *Scanner) scanRune() string { + // '\'' opening already consumed + offs := s.offset - 1 + + valid := true + n := 0 + for { + ch := s.ch + if ch == '\n' || ch < 0 { + // only report error if we don't have one already + if valid { + s.error(offs, "rune literal not terminated") + valid = false + } + break + } + s.next() + if ch == '\'' { + break + } + n++ + if ch == '\\' { + if !s.scanEscape('\'') { + valid = false + } + // continue to read to closing quote + } + } + + if valid && n != 1 { + s.error(offs, "illegal rune literal") + } + + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanString() string { + // '"' opening already consumed + offs := s.offset - 1 + + for { + ch := s.ch + if ch == '\n' || ch < 0 { + s.error(offs, "string literal not terminated") + break + } + s.next() + if ch == '"' { + break + } + if ch == '\\' { + s.scanEscape('"') + } + } + + return string(s.src[offs:s.offset]) +} + +func stripCR(b []byte, comment bool) []byte { + c := make([]byte, len(b)) + i := 0 + for j, ch := range b { + // In a /*-style comment, don't strip \r from *\r/ (incl. + // sequences of \r from *\r\r...\r/) since the resulting + // */ would terminate the comment too early unless the \r + // is immediately following the opening /* in which case + // it's ok because /*/ is not closed yet (issue #11151). + if ch != '\r' || comment && i > len("/*") && c[i-1] == '*' && j+1 < len(b) && b[j+1] == '/' { + c[i] = ch + i++ + } + } + return c[:i] +} + +func (s *Scanner) scanRawString() string { + // '`' opening already consumed + offs := s.offset - 1 + + hasCR := false + for { + ch := s.ch + if ch < 0 { + s.error(offs, "raw string literal not terminated") + break + } + s.next() + if ch == '`' { + break + } + if ch == '\r' { + hasCR = true + } + } + + lit := s.src[offs:s.offset] + if hasCR { + lit = stripCR(lit, false) + } + + return string(lit) +} + +func (s *Scanner) skipWhitespace() { + for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !s.insertSemi || s.ch == '\r' { + s.next() + } +} + +// Helper functions for scanning multi-byte tokens such as >> += >>= . +// Different routines recognize different length tok_i based on matches +// of ch_i. If a token ends in '=', the result is tok1 or tok3 +// respectively. Otherwise, the result is tok0 if there was no other +// matching character, or tok2 if the matching character was ch2. + +func (s *Scanner) switch2(tok0, tok1 token.Token) token.Token { + if s.ch == '=' { + s.next() + return tok1 + } + return tok0 +} + +func (s *Scanner) switch3(tok0, tok1 token.Token, ch2 rune, tok2 token.Token) token.Token { + if s.ch == '=' { + s.next() + return tok1 + } + if s.ch == ch2 { + s.next() + return tok2 + } + return tok0 +} + +func (s *Scanner) switch4(tok0, tok1 token.Token, ch2 rune, tok2, tok3 token.Token) token.Token { + if s.ch == '=' { + s.next() + return tok1 + } + if s.ch == ch2 { + s.next() + if s.ch == '=' { + s.next() + return tok3 + } + return tok2 + } + return tok0 +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// token.EOF. +// +// If the returned token is a literal (token.IDENT, token.INT, token.FLOAT, +// token.IMAG, token.CHAR, token.STRING) or token.COMMENT, the literal string +// has the corresponding value. +// +// If the returned token is a keyword, the literal string is the keyword. +// +// If the returned token is token.SEMICOLON, the corresponding +// literal string is ";" if the semicolon was present in the source, +// and "\n" if the semicolon was inserted because of a newline or +// at EOF. +// +// If the returned token is token.ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +// +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace() + + // current token start + pos = s.file.Pos(s.offset) + + // determine token value + insertSemi := false + switch ch := s.ch; { + case isLetter(ch): + lit = s.scanIdentifier() + if len(lit) > 1 { + // keywords are longer than one letter - avoid lookup otherwise + tok = token.Lookup(lit) + switch tok { + case token.IDENT, token.BREAK, token.CONTINUE, token.FALLTHROUGH, token.RETURN: + insertSemi = true + } + } else { + insertSemi = true + tok = token.IDENT + } + case '0' <= ch && ch <= '9': + insertSemi = true + tok, lit = s.scanNumber(false) + default: + s.next() // always make progress + switch ch { + case -1: + if s.insertSemi { + s.insertSemi = false // EOF consumed + return pos, token.SEMICOLON, "\n" + } + tok = token.EOF + case '\n': + // we only reach here if s.insertSemi was + // set in the first place and exited early + // from s.skipWhitespace() + s.insertSemi = false // newline consumed + return pos, token.SEMICOLON, "\n" + case '"': + insertSemi = true + tok = token.STRING + lit = s.scanString() + case '\'': + insertSemi = true + tok = token.CHAR + lit = s.scanRune() + case '`': + insertSemi = true + tok = token.STRING + lit = s.scanRawString() + case ':': + tok = s.switch2(token.COLON, token.DEFINE) + case '.': + if '0' <= s.ch && s.ch <= '9' { + insertSemi = true + tok, lit = s.scanNumber(true) + } else { + tok = token.PERIOD + if s.ch == '.' && s.peek() == '.' { + s.next() + s.next() // consume last '.' + tok = token.ELLIPSIS + } + } + case ',': + tok = token.COMMA + case ';': + tok = token.SEMICOLON + lit = ";" + case '(': + tok = token.LPAREN + case ')': + insertSemi = true + tok = token.RPAREN + case '[': + tok = token.LBRACK + case ']': + insertSemi = true + tok = token.RBRACK + case '{': + tok = token.LBRACE + case '}': + insertSemi = true + tok = token.RBRACE + case '+': + tok = s.switch3(token.ADD, token.ADD_ASSIGN, '+', token.INC) + if tok == token.INC { + insertSemi = true + } + case '-': + tok = s.switch3(token.SUB, token.SUB_ASSIGN, '-', token.DEC) + if tok == token.DEC { + insertSemi = true + } + case '*': + tok = s.switch2(token.MUL, token.MUL_ASSIGN) + case '/': + if s.ch == '/' || s.ch == '*' { + // comment + if s.insertSemi && s.findLineEnd() { + // reset position to the beginning of the comment + s.ch = '/' + s.offset = s.file.Offset(pos) + s.rdOffset = s.offset + 1 + s.insertSemi = false // newline consumed + return pos, token.SEMICOLON, "\n" + } + comment := s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + s.insertSemi = false // newline consumed + goto scanAgain + } + tok = token.COMMENT + lit = comment + } else { + tok = s.switch2(token.QUO, token.QUO_ASSIGN) + } + case '%': + tok = s.switch2(token.REM, token.REM_ASSIGN) + case '^': + tok = s.switch2(token.XOR, token.XOR_ASSIGN) + case '<': + if s.ch == '-' { + s.next() + tok = token.ARROW + } else { + tok = s.switch4(token.LSS, token.LEQ, '<', token.SHL, token.SHL_ASSIGN) + } + case '>': + tok = s.switch4(token.GTR, token.GEQ, '>', token.SHR, token.SHR_ASSIGN) + case '=': + tok = s.switch2(token.ASSIGN, token.EQL) + case '!': + tok = s.switch2(token.NOT, token.NEQ) + case '&': + if s.ch == '^' { + s.next() + tok = s.switch2(token.AND_NOT, token.AND_NOT_ASSIGN) + } else { + tok = s.switch3(token.AND, token.AND_ASSIGN, '&', token.LAND) + } + case '|': + tok = s.switch3(token.OR, token.OR_ASSIGN, '|', token.LOR) + default: + // next reports unexpected BOMs - don't repeat + if ch != bom { + s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) + } + insertSemi = s.insertSemi // preserve insertSemi info + tok = token.ILLEGAL + lit = string(ch) + } + } + if s.mode&dontInsertSemis == 0 { + s.insertSemi = insertSemi + } + + return +} diff --git a/vendor/github.com/shurcooL/highlight_go/internal/go/token/position.go b/vendor/github.com/shurcooL/highlight_go/internal/go/token/position.go new file mode 100644 index 0000000..241133f --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/internal/go/token/position.go @@ -0,0 +1,515 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +// +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (byte count) +} + +// IsValid reports whether the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// file:line valid position with file name but no column (column == 0) +// line:column valid position without file name +// line valid position without file name and no column (column == 0) +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d", pos.Line) + if pos.Column != 0 { + s += fmt.Sprintf(":%d", pos.Column) + } + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +// +// The Pos value for a given file is a number in the range [base, base+size], +// where base and size are specified when adding the file to the file set via +// AddFile. +// +// To create the Pos value for a specific source offset (measured in bytes), +// first add the respective file to the current file set using FileSet.AddFile +// and then call File.Pos(offset) for that file. Given a Pos value p +// for a specific file set fset, the corresponding Position value is +// obtained by calling fset.Position(p). +// +// Pos values can be compared directly with the usual comparison operators: +// If two Pos values p and q are in the same file, comparing p and q is +// equivalent to comparing the respective source file offsets. If p and q +// are in different files, p < q is true if the file implied by p was added +// to the respective file set before the file implied by q. +// +type Pos int + +// The zero value for Pos is NoPos; there is no file and line information +// associated with it, and NoPos.IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +// +const NoPos Pos = 0 + +// IsValid reports whether the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// ----------------------------------------------------------------------------- +// File + +// A File is a handle for a file belonging to a FileSet. +// A File has a name, size, and line offset table. +// +type File struct { + set *FileSet + name string // file name as provided to AddFile + base int // Pos value range for this file is [base...base+size] + size int // file size as provided to AddFile + + // lines and infos are protected by mutex + mutex sync.Mutex + lines []int // lines contains the offset of the first character for each line (the first entry is always 0) + infos []lineInfo +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return f.base +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return f.size +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.mutex.Lock() + n := len(f.lines) + f.mutex.Unlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + f.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { + f.lines = append(f.lines, offset) + } + f.mutex.Unlock() +} + +// MergeLine merges a line with the following line. It is akin to replacing +// the newline character at the end of the line with a space (to not change the +// remaining offsets). To obtain the line number, consult e.g. Position.Line. +// MergeLine will panic if given an invalid line number. +// +func (f *File) MergeLine(line int) { + if line <= 0 { + panic("illegal line number (line numbering starts at 1)") + } + f.mutex.Lock() + defer f.mutex.Unlock() + if line >= len(f.lines) { + panic("illegal line number") + } + // To merge the line numbered with the line numbered , + // we need to remove the entry in lines corresponding to the line + // numbered . The entry in lines corresponding to the line + // numbered is located at index , since indices in lines + // are 0-based and line numbers are 1-based. + copy(f.lines[line:], f.lines[line+1:]) + f.lines = f.lines[:len(f.lines)-1] +} + +// SetLines sets the line offsets for a file and reports whether it succeeded. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// Callers must not mutate the provided slice after SetLines returns. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= offset { + return false + } + } + + // set lines table + f.mutex.Lock() + f.lines = lines + f.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +// It ignores position-altering //line comments. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.mutex.Lock() + f.lines = lines + f.mutex.Unlock() +} + +// A lineInfo object describes alternative file, line, and column +// number information (such as provided via a //line directive) +// for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line, Column int +} + +// AddLineInfo is like AddLineColumnInfo with a column = 1 argument. +// It is here for backward-compatibility for code prior to Go 1.11. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + f.AddLineColumnInfo(offset, filename, line, 1) +} + +// AddLineColumnInfo adds alternative file, line, and column number +// information for a given file offset. The offset must be larger +// than the offset for the previously added alternative line info +// and smaller than the file size; otherwise the information is +// ignored. +// +// AddLineColumnInfo is typically used to register alternative position +// information for line directives such as //line filename:line:column. +// +func (f *File) AddLineColumnInfo(offset int, filename string, line, column int) { + f.mutex.Lock() + if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line, column}) + } + f.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int) Pos { + if offset > f.size { + panic("illegal file offset") + } + return Pos(f.base + offset) +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + return int(p) - f.base +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// unpack returns the filename and line and column number for a file offset. +// If adjusted is set, unpack will return the filename and line information +// possibly adjusted by //line comments; otherwise those comments are ignored. +// +func (f *File) unpack(offset int, adjusted bool) (filename string, line, column int) { + f.mutex.Lock() + defer f.mutex.Unlock() + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = i+1, offset-f.lines[i]+1 + } + if adjusted && len(f.infos) > 0 { + // few files have extra line infos + if i := searchLineInfos(f.infos, offset); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, alt.Offset); i >= 0 { + // i+1 is the line at which the alternative position was recorded + d := line - (i + 1) // line distance from alternative position base + line = alt.Line + d + if alt.Column == 0 { + // alternative column is unknown => relative column is unknown + // (the current specification for line directives requires + // this to apply until the next PosBase/line directive, + // not just until the new newline) + column = 0 + } else if d == 0 { + // the alternative position base is on the current line + // => column is relative to alternative column + column = alt.Column + (offset - alt.Offset) + } + } + } + } + return +} + +func (f *File) position(p Pos, adjusted bool) (pos Position) { + offset := int(p) - f.base + pos.Offset = offset + pos.Filename, pos.Line, pos.Column = f.unpack(offset, adjusted) + return +} + +// PositionFor returns the Position value for the given file position p. +// If adjusted is set, the position may be adjusted by position-altering +// //line comments; otherwise those comments are ignored. +// p must be a Pos value in f or NoPos. +// +func (f *File) PositionFor(p Pos, adjusted bool) (pos Position) { + if p != NoPos { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p, adjusted) + } + return +} + +// Position returns the Position value for the given file position p. +// Calling f.Position(p) is equivalent to calling f.PositionFor(p, true). +// +func (f *File) Position(p Pos) (pos Position) { + return f.PositionFor(p, true) +} + +// ----------------------------------------------------------------------------- +// FileSet + +// A FileSet represents a set of source files. +// Methods of file sets are synchronized; multiple goroutines +// may invoke them concurrently. +// +type FileSet struct { + mutex sync.RWMutex // protects the file set + base int // base offset for the next file + files []*File // list of files in the order added to the set + last *File // cache of last file looked up +} + +// NewFileSet creates a new file set. +func NewFileSet() *FileSet { + return &FileSet{ + base: 1, // 0 == NoPos + } +} + +// Base returns the minimum base offset that must be provided to +// AddFile when adding the next file. +// +func (s *FileSet) Base() int { + s.mutex.RLock() + b := s.base + s.mutex.RUnlock() + return b + +} + +// AddFile adds a new file with a given filename, base offset, and file size +// to the file set s and returns the file. Multiple files may have the same +// name. The base offset must not be smaller than the FileSet's Base(), and +// size must not be negative. As a special case, if a negative base is provided, +// the current value of the FileSet's Base() is used instead. +// +// Adding the file will set the file set's Base() value to base + size + 1 +// as the minimum base value for the next file. The following relationship +// exists between a Pos value p for a given file offset offs: +// +// int(p) = base + offs +// +// with offs in the range [0, size] and thus p in the range [base, base+size]. +// For convenience, File.Pos may be used to create file-specific position +// values from a file offset. +// +func (s *FileSet) AddFile(filename string, base, size int) *File { + s.mutex.Lock() + defer s.mutex.Unlock() + if base < 0 { + base = s.base + } + if base < s.base || size < 0 { + panic("illegal base or size") + } + // base >= s.base && size >= 0 + f := &File{set: s, name: filename, base: base, size: size, lines: []int{0}} + base += size + 1 // +1 because EOF also has a position + if base < 0 { + panic("token.Pos offset overflow (> 2G of source code in file set)") + } + // add the file to the file set + s.base = base + s.files = append(s.files, f) + s.last = f + return f +} + +// Iterate calls f for the files in the file set in the order they were added +// until f returns false. +// +func (s *FileSet) Iterate(f func(*File) bool) { + for i := 0; ; i++ { + var file *File + s.mutex.RLock() + if i < len(s.files) { + file = s.files[i] + } + s.mutex.RUnlock() + if file == nil || !f(file) { + break + } + } +} + +func searchFiles(a []*File, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 +} + +func (s *FileSet) file(p Pos) *File { + s.mutex.RLock() + // common case: p is in last file + if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { + s.mutex.RUnlock() + return f + } + // p is not in last file - search all files + if i := searchFiles(s.files, int(p)); i >= 0 { + f := s.files[i] + // f.base <= int(p) by definition of searchFiles + if int(p) <= f.base+f.size { + s.mutex.RUnlock() + s.mutex.Lock() + s.last = f // race is ok - s.last is only a cache + s.mutex.Unlock() + return f + } + } + s.mutex.RUnlock() + return nil +} + +// File returns the file that contains the position p. +// If no such file is found (for instance for p == NoPos), +// the result is nil. +// +func (s *FileSet) File(p Pos) (f *File) { + if p != NoPos { + f = s.file(p) + } + return +} + +// PositionFor converts a Pos p in the fileset into a Position value. +// If adjusted is set, the position may be adjusted by position-altering +// //line comments; otherwise those comments are ignored. +// p must be a Pos value in s or NoPos. +// +func (s *FileSet) PositionFor(p Pos, adjusted bool) (pos Position) { + if p != NoPos { + if f := s.file(p); f != nil { + return f.position(p, adjusted) + } + } + return +} + +// Position converts a Pos p in the fileset into a Position value. +// Calling s.Position(p) is equivalent to calling s.PositionFor(p, true). +// +func (s *FileSet) Position(p Pos) (pos Position) { + return s.PositionFor(p, true) +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []int, x int) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/github.com/shurcooL/highlight_go/internal/go/token/serialize.go b/vendor/github.com/shurcooL/highlight_go/internal/go/token/serialize.go new file mode 100644 index 0000000..d0ea345 --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/internal/go/token/serialize.go @@ -0,0 +1,71 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package token + +type serializedFile struct { + // fields correspond 1:1 to fields with same (lower-case) name in File + Name string + Base int + Size int + Lines []int + Infos []lineInfo +} + +type serializedFileSet struct { + Base int + Files []serializedFile +} + +// Read calls decode to deserialize a file set into s; s must not be nil. +func (s *FileSet) Read(decode func(interface{}) error) error { + var ss serializedFileSet + if err := decode(&ss); err != nil { + return err + } + + s.mutex.Lock() + s.base = ss.Base + files := make([]*File, len(ss.Files)) + for i := 0; i < len(ss.Files); i++ { + f := &ss.Files[i] + files[i] = &File{ + set: s, + name: f.Name, + base: f.Base, + size: f.Size, + lines: f.Lines, + infos: f.Infos, + } + } + s.files = files + s.last = nil + s.mutex.Unlock() + + return nil +} + +// Write calls encode to serialize the file set s. +func (s *FileSet) Write(encode func(interface{}) error) error { + var ss serializedFileSet + + s.mutex.Lock() + ss.Base = s.base + files := make([]serializedFile, len(s.files)) + for i, f := range s.files { + f.mutex.Lock() + files[i] = serializedFile{ + Name: f.name, + Base: f.base, + Size: f.size, + Lines: append([]int(nil), f.lines...), + Infos: append([]lineInfo(nil), f.infos...), + } + f.mutex.Unlock() + } + ss.Files = files + s.mutex.Unlock() + + return encode(ss) +} diff --git a/vendor/github.com/shurcooL/highlight_go/internal/go/token/token.go b/vendor/github.com/shurcooL/highlight_go/internal/go/token/token.go new file mode 100644 index 0000000..b6a26f6 --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/internal/go/token/token.go @@ -0,0 +1,253 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package token is a copy of go/token from Go 1.11.1. +// It makes its Token a type alias of "go/token".Token. +package token + +import "go/token" + +// Token is the set of lexical tokens of the Go programming language. +type Token = token.Token + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + + literal_beg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // main + INT // 12345 + FLOAT // 123.45 + IMAG // 123.45i + CHAR // 'a' + STRING // "abc" + literal_end + + operator_beg + // Operators and delimiters + ADD // + + SUB // - + MUL // * + QUO // / + REM // % + + AND // & + OR // | + XOR // ^ + SHL // << + SHR // >> + AND_NOT // &^ + + ADD_ASSIGN // += + SUB_ASSIGN // -= + MUL_ASSIGN // *= + QUO_ASSIGN // /= + REM_ASSIGN // %= + + AND_ASSIGN // &= + OR_ASSIGN // |= + XOR_ASSIGN // ^= + SHL_ASSIGN // <<= + SHR_ASSIGN // >>= + AND_NOT_ASSIGN // &^= + + LAND // && + LOR // || + ARROW // <- + INC // ++ + DEC // -- + + EQL // == + LSS // < + GTR // > + ASSIGN // = + NOT // ! + + NEQ // != + LEQ // <= + GEQ // >= + DEFINE // := + ELLIPSIS // ... + + LPAREN // ( + LBRACK // [ + LBRACE // { + COMMA // , + PERIOD // . + + RPAREN // ) + RBRACK // ] + RBRACE // } + SEMICOLON // ; + COLON // : + operator_end + + keyword_beg + // Keywords + BREAK + CASE + CHAN + CONST + CONTINUE + + DEFAULT + DEFER + ELSE + FALLTHROUGH + FOR + + FUNC + GO + GOTO + IF + IMPORT + + INTERFACE + MAP + PACKAGE + RANGE + RETURN + + SELECT + STRUCT + SWITCH + TYPE + VAR + keyword_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + INT: "INT", + FLOAT: "FLOAT", + IMAG: "IMAG", + CHAR: "CHAR", + STRING: "STRING", + + ADD: "+", + SUB: "-", + MUL: "*", + QUO: "/", + REM: "%", + + AND: "&", + OR: "|", + XOR: "^", + SHL: "<<", + SHR: ">>", + AND_NOT: "&^", + + ADD_ASSIGN: "+=", + SUB_ASSIGN: "-=", + MUL_ASSIGN: "*=", + QUO_ASSIGN: "/=", + REM_ASSIGN: "%=", + + AND_ASSIGN: "&=", + OR_ASSIGN: "|=", + XOR_ASSIGN: "^=", + SHL_ASSIGN: "<<=", + SHR_ASSIGN: ">>=", + AND_NOT_ASSIGN: "&^=", + + LAND: "&&", + LOR: "||", + ARROW: "<-", + INC: "++", + DEC: "--", + + EQL: "==", + LSS: "<", + GTR: ">", + ASSIGN: "=", + NOT: "!", + + NEQ: "!=", + LEQ: "<=", + GEQ: ">=", + DEFINE: ":=", + ELLIPSIS: "...", + + LPAREN: "(", + LBRACK: "[", + LBRACE: "{", + COMMA: ",", + PERIOD: ".", + + RPAREN: ")", + RBRACK: "]", + RBRACE: "}", + SEMICOLON: ";", + COLON: ":", + + BREAK: "break", + CASE: "case", + CHAN: "chan", + CONST: "const", + CONTINUE: "continue", + + DEFAULT: "default", + DEFER: "defer", + ELSE: "else", + FALLTHROUGH: "fallthrough", + FOR: "for", + + FUNC: "func", + GO: "go", + GOTO: "goto", + IF: "if", + IMPORT: "import", + + INTERFACE: "interface", + MAP: "map", + PACKAGE: "package", + RANGE: "range", + RETURN: "return", + + SELECT: "select", + STRUCT: "struct", + SWITCH: "switch", + TYPE: "type", + VAR: "var", +} + +// A set of constants for precedence-based expression parsing. +// Non-operators have lowest precedence, followed by operators +// starting with precedence 1 up to unary operators. The highest +// precedence serves as "catch-all" precedence for selector, +// indexing, and other operator and delimiter tokens. +// +const ( + LowestPrec = 0 // non-operators + UnaryPrec = 6 + HighestPrec = 7 +) + +var keywords map[string]Token + +func init() { + keywords = make(map[string]Token) + for i := keyword_beg + 1; i < keyword_end; i++ { + keywords[tokens[i]] = i + } +} + +// Lookup maps an identifier to its keyword token or IDENT (if not a keyword). +// +func Lookup(ident string) Token { + if tok, is_keyword := keywords[ident]; is_keyword { + return tok + } + return IDENT +} diff --git a/vendor/github.com/shurcooL/highlight_go/main.go b/vendor/github.com/shurcooL/highlight_go/main.go new file mode 100644 index 0000000..1ce7331 --- /dev/null +++ b/vendor/github.com/shurcooL/highlight_go/main.go @@ -0,0 +1,122 @@ +// Package highlight_go provides a syntax highlighter for Go, using go/scanner. +package highlight_go + +import ( + "io" + + "github.com/shurcooL/highlight_go/internal/go/scanner" + "github.com/shurcooL/highlight_go/internal/go/token" + + "github.com/sourcegraph/annotate" + "github.com/sourcegraph/syntaxhighlight" +) + +// TODO: Stop using internal copies of go/scanner and go/token in Go 1.12. + +// TokenKind returns a syntaxhighlight token kind value for the given tok and lit. +func TokenKind(tok token.Token, lit string) syntaxhighlight.Kind { + switch { + case tok.IsKeyword() || (tok.IsOperator() && tok <= token.ELLIPSIS): + return syntaxhighlight.Keyword + + // Literals. + case tok == token.INT || tok == token.FLOAT || tok == token.IMAG || tok == token.CHAR: + return syntaxhighlight.Decimal + case tok == token.STRING: + return syntaxhighlight.String + case lit == "true" || lit == "false" || lit == "iota" || lit == "nil": + return syntaxhighlight.Literal + + case tok == token.COMMENT: + return syntaxhighlight.Comment + default: + return syntaxhighlight.Plaintext + } +} + +func Print(src []byte, w io.Writer, p syntaxhighlight.Printer) error { + var s scanner.Scanner + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + s.Init(file, src, nil, scanner.ScanComments) + + prevEndOffset := 0 + + for { + pos, tok, lit := s.Scan() + if tok == token.EOF { + break + } + if tok == token.SEMICOLON && lit == "\n" { + continue + } + + offset := fset.Position(pos).Offset + + // Print whitespace between previous token and current token, if any. + if prevEndOffset < offset { + err := p.Print(w, syntaxhighlight.Whitespace, string(src[prevEndOffset:offset])) + if err != nil { + return err + } + } + + text := tokenText(tok, lit) + + // Print token. + err := p.Print(w, TokenKind(tok, lit), text) + if err != nil { + return err + } + + prevEndOffset = offset + len(text) + } + + // Print final whitespace between last token and EOF, if any. + if prevEndOffset < len(src) { + err := p.Print(w, syntaxhighlight.Whitespace, string(src[prevEndOffset:])) + if err != nil { + return err + } + } + + return nil +} + +func Annotate(src []byte, a syntaxhighlight.Annotator) (annotate.Annotations, error) { + var s scanner.Scanner + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + s.Init(file, src, nil, scanner.ScanComments) + + var anns annotate.Annotations + + for { + pos, tok, lit := s.Scan() + if tok == token.EOF { + break + } + if tok == token.SEMICOLON && lit == "\n" { + continue + } + + // Annotate token. + ann, err := a.Annotate(fset.Position(pos).Offset, TokenKind(tok, lit), tokenText(tok, lit)) + if err != nil { + return nil, err + } + if ann == nil { + continue + } + anns = append(anns, ann) + } + + return anns, nil +} + +func tokenText(tok token.Token, lit string) string { + if lit == "" { + return tok.String() + } + return lit +} diff --git a/vendor/github.com/shurcooL/octicon/.travis.yml b/vendor/github.com/shurcooL/octicon/.travis.yml new file mode 100644 index 0000000..93b1fcd --- /dev/null +++ b/vendor/github.com/shurcooL/octicon/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go +go: + - 1.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/octicon/LICENSE b/vendor/github.com/shurcooL/octicon/LICENSE new file mode 100644 index 0000000..84256b3 --- /dev/null +++ b/vendor/github.com/shurcooL/octicon/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 GitHub Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/shurcooL/octicon/README.md b/vendor/github.com/shurcooL/octicon/README.md new file mode 100644 index 0000000..2ed126b --- /dev/null +++ b/vendor/github.com/shurcooL/octicon/README.md @@ -0,0 +1,18 @@ +octicon +======= + +[![Build Status](https://travis-ci.org/shurcooL/octicon.svg?branch=master)](https://travis-ci.org/shurcooL/octicon) [![GoDoc](https://godoc.org/github.com/shurcooL/octicon?status.svg)](https://godoc.org/github.com/shurcooL/octicon) + +Package octicon provides GitHub Octicons. + +Installation +------------ + +```bash +go get -u github.com/shurcooL/octicon +``` + +License +------- + +- [MIT License](LICENSE) diff --git a/vendor/github.com/shurcooL/octicon/doc.go b/vendor/github.com/shurcooL/octicon/doc.go new file mode 100644 index 0000000..247aced --- /dev/null +++ b/vendor/github.com/shurcooL/octicon/doc.go @@ -0,0 +1,11 @@ +// Package octicon provides GitHub Octicons. +package octicon + +//go:generate curl -L -o octicons.tgz https://registry.npmjs.org/octicons/-/octicons-8.1.0.tgz +//go:generate tar -xf octicons.tgz package/build/data.json +//go:generate rm octicons.tgz +//go:generate mv package/build/data.json _data/data.json +//go:generate rmdir -p package/build +//go:generate go run generate.go -o octicon.go +//go:generate unconvert -apply +//go:generate gofmt -w -s octicon.go diff --git a/vendor/github.com/shurcooL/octicon/octicon.go b/vendor/github.com/shurcooL/octicon/octicon.go new file mode 100644 index 0000000..b55b1e4 --- /dev/null +++ b/vendor/github.com/shurcooL/octicon/octicon.go @@ -0,0 +1,10882 @@ +package octicon + +import ( + "strconv" + + "golang.org/x/net/html" + "golang.org/x/net/html/atom" +) + +// Icon returns the named Octicon SVG node. +// It returns nil if name is not a valid Octicon symbol name. +func Icon(name string) *html.Node { + switch name { + case "alert": + return Alert() + case "archive": + return Archive() + case "arrow-both": + return ArrowBoth() + case "arrow-down": + return ArrowDown() + case "arrow-left": + return ArrowLeft() + case "arrow-right": + return ArrowRight() + case "arrow-small-down": + return ArrowSmallDown() + case "arrow-small-left": + return ArrowSmallLeft() + case "arrow-small-right": + return ArrowSmallRight() + case "arrow-small-up": + return ArrowSmallUp() + case "arrow-up": + return ArrowUp() + case "beaker": + return Beaker() + case "bell": + return Bell() + case "bold": + return Bold() + case "book": + return Book() + case "bookmark": + return Bookmark() + case "briefcase": + return Briefcase() + case "broadcast": + return Broadcast() + case "browser": + return Browser() + case "bug": + return Bug() + case "calendar": + return Calendar() + case "check": + return Check() + case "checklist": + return Checklist() + case "chevron-down": + return ChevronDown() + case "chevron-left": + return ChevronLeft() + case "chevron-right": + return ChevronRight() + case "chevron-up": + return ChevronUp() + case "circle-slash": + return CircleSlash() + case "circuit-board": + return CircuitBoard() + case "clippy": + return Clippy() + case "clock": + return Clock() + case "cloud-download": + return CloudDownload() + case "cloud-upload": + return CloudUpload() + case "code": + return Code() + case "comment": + return Comment() + case "comment-discussion": + return CommentDiscussion() + case "credit-card": + return CreditCard() + case "dash": + return Dash() + case "dashboard": + return Dashboard() + case "database": + return Database() + case "desktop-download": + return DesktopDownload() + case "device-camera": + return DeviceCamera() + case "device-camera-video": + return DeviceCameraVideo() + case "device-desktop": + return DeviceDesktop() + case "device-mobile": + return DeviceMobile() + case "diff": + return Diff() + case "diff-added": + return DiffAdded() + case "diff-ignored": + return DiffIgnored() + case "diff-modified": + return DiffModified() + case "diff-removed": + return DiffRemoved() + case "diff-renamed": + return DiffRenamed() + case "ellipsis": + return Ellipsis() + case "eye": + return Eye() + case "file": + return File() + case "file-binary": + return FileBinary() + case "file-code": + return FileCode() + case "file-directory": + return FileDirectory() + case "file-media": + return FileMedia() + case "file-pdf": + return FilePdf() + case "file-submodule": + return FileSubmodule() + case "file-symlink-directory": + return FileSymlinkDirectory() + case "file-symlink-file": + return FileSymlinkFile() + case "file-zip": + return FileZip() + case "flame": + return Flame() + case "fold": + return Fold() + case "gear": + return Gear() + case "gift": + return Gift() + case "gist": + return Gist() + case "gist-secret": + return GistSecret() + case "git-branch": + return GitBranch() + case "git-commit": + return GitCommit() + case "git-compare": + return GitCompare() + case "git-merge": + return GitMerge() + case "git-pull-request": + return GitPullRequest() + case "globe": + return Globe() + case "grabber": + return Grabber() + case "graph": + return Graph() + case "heart": + return Heart() + case "history": + return History() + case "home": + return Home() + case "horizontal-rule": + return HorizontalRule() + case "hubot": + return Hubot() + case "inbox": + return Inbox() + case "info": + return Info() + case "issue-closed": + return IssueClosed() + case "issue-opened": + return IssueOpened() + case "issue-reopened": + return IssueReopened() + case "italic": + return Italic() + case "jersey": + return Jersey() + case "kebab-horizontal": + return KebabHorizontal() + case "kebab-vertical": + return KebabVertical() + case "key": + return Key() + case "keyboard": + return Keyboard() + case "law": + return Law() + case "light-bulb": + return LightBulb() + case "link": + return Link() + case "link-external": + return LinkExternal() + case "list-ordered": + return ListOrdered() + case "list-unordered": + return ListUnordered() + case "location": + return Location() + case "lock": + return Lock() + case "logo-gist": + return LogoGist() + case "logo-github": + return LogoGitHub() + case "mail": + return Mail() + case "mail-read": + return MailRead() + case "mark-github": + return MarkGitHub() + case "markdown": + return Markdown() + case "megaphone": + return Megaphone() + case "mention": + return Mention() + case "milestone": + return Milestone() + case "mirror": + return Mirror() + case "mortar-board": + return MortarBoard() + case "mute": + return Mute() + case "no-newline": + return NoNewline() + case "note": + return Note() + case "octoface": + return Octoface() + case "organization": + return Organization() + case "package": + return Package() + case "paintcan": + return Paintcan() + case "pencil": + return Pencil() + case "person": + return Person() + case "pin": + return Pin() + case "plug": + return Plug() + case "plus": + return Plus() + case "plus-small": + return PlusSmall() + case "primitive-dot": + return PrimitiveDot() + case "primitive-square": + return PrimitiveSquare() + case "project": + return Project() + case "pulse": + return Pulse() + case "question": + return Question() + case "quote": + return Quote() + case "radio-tower": + return RadioTower() + case "reply": + return Reply() + case "repo": + return Repo() + case "repo-clone": + return RepoClone() + case "repo-force-push": + return RepoForcePush() + case "repo-forked": + return RepoForked() + case "repo-pull": + return RepoPull() + case "repo-push": + return RepoPush() + case "report": + return Report() + case "rocket": + return Rocket() + case "rss": + return RSS() + case "ruby": + return Ruby() + case "screen-full": + return ScreenFull() + case "screen-normal": + return ScreenNormal() + case "search": + return Search() + case "server": + return Server() + case "settings": + return Settings() + case "shield": + return Shield() + case "sign-in": + return SignIn() + case "sign-out": + return SignOut() + case "smiley": + return Smiley() + case "squirrel": + return Squirrel() + case "star": + return Star() + case "stop": + return Stop() + case "sync": + return Sync() + case "tag": + return Tag() + case "tasklist": + return Tasklist() + case "telescope": + return Telescope() + case "terminal": + return Terminal() + case "text-size": + return TextSize() + case "three-bars": + return ThreeBars() + case "thumbsdown": + return Thumbsdown() + case "thumbsup": + return Thumbsup() + case "tools": + return Tools() + case "trashcan": + return Trashcan() + case "triangle-down": + return TriangleDown() + case "triangle-left": + return TriangleLeft() + case "triangle-right": + return TriangleRight() + case "triangle-up": + return TriangleUp() + case "unfold": + return Unfold() + case "unmute": + return Unmute() + case "unverified": + return Unverified() + case "verified": + return Verified() + case "versions": + return Versions() + case "watch": + return Watch() + case "x": + return X() + case "zap": + return Zap() + default: + return nil + } +} + +// SetSize sets size of icon, and returns a reference to it. +func SetSize(icon *html.Node, size int) *html.Node { + icon.Attr[1].Val = strconv.Itoa(size) + icon.Attr[2].Val = strconv.Itoa(size) + return icon +} + +// Alert returns an "alert" Octicon SVG node. +func Alert() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8.893 1.5c-.183-.31-.52-.5-.887-.5s-.703.19-.886.5L.138 13.499a.98.98 0 0 0 0 1.001c.193.31.53.501.886.501h13.964c.367 0 .704-.19.877-.5a1.03 1.03 0 0 0 .01-1.002L8.893 1.5zm.133 11.497H6.987v-2.003h2.039v2.003zm0-3.004H6.987V5.987h2.039v4.006z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Archive returns an "archive" Octicon SVG node. +func Archive() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 2H1v2h12V2zM0 4a1 1 0 0 0 1 1v9a1 1 0 0 0 1 1h10a1 1 0 0 0 1-1V5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H1a1 1 0 0 0-1 1v2zm2 1h10v9H2V5zm2 3h6V7H4v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowBoth returns an "arrow-both" Octicon SVG node. +func ArrowBoth() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 8l6-5v3h8V3l6 5-6 5v-3H6v3L0 8z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 20 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowDown returns an "arrow-down" Octicon SVG node. +func ArrowDown() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 7V3H3v4H0l5 6 5-6H7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowLeft returns an "arrow-left" Octicon SVG node. +func ArrowLeft() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 3L0 8l6 5v-3h4V6H6V3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowRight returns an "arrow-right" Octicon SVG node. +func ArrowRight() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 8L4 3v3H0v4h4v3l6-5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowSmallDown returns an "arrow-small-down" Octicon SVG node. +func ArrowSmallDown() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 7V5H2v2H0l3 4 3-4H4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 6 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowSmallLeft returns an "arrow-small-left" Octicon SVG node. +func ArrowSmallLeft() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 7V5L0 8l4 3V9h2V7H4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 6 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowSmallRight returns an "arrow-small-right" Octicon SVG node. +func ArrowSmallRight() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 8L2 5v2H0v2h2v2l4-3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 6 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowSmallUp returns an "arrow-small-up" Octicon SVG node. +func ArrowSmallUp() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M3 5L0 9h2v2h2V9h2L3 5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 6 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ArrowUp returns an "arrow-up" Octicon SVG node. +func ArrowUp() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M5 3L0 9h3v4h4V9h3L5 3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Beaker returns an "beaker" Octicon SVG node. +func Beaker() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M14.38 14.59L11 7V3h1V2H3v1h1v4L.63 14.59A1 1 0 0 0 1.54 16h11.94c.72 0 1.2-.75.91-1.41h-.01zM3.75 10L5 7V3h5v4l1.25 3h-7.5zM8 8h1v1H8V8zM7 7H6V6h1v1zm0-3h1v1H7V4zm0-3H6V0h1v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Bell returns an "bell" Octicon SVG node. +func Bell() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13.99 11.991v1H0v-1l.73-.58c.769-.769.809-2.547 1.189-4.416.77-3.767 4.077-4.996 4.077-4.996 0-.55.45-1 .999-1 .55 0 1 .45 1 1 0 0 3.387 1.229 4.156 4.996.38 1.879.42 3.657 1.19 4.417l.659.58h-.01zM6.995 15.99c1.11 0 1.999-.89 1.999-1.999H4.996c0 1.11.89 1.999 1.999 1.999z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Bold returns an "bold" Octicon SVG node. +func Bold() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M1 2h3.83c2.48 0 4.3.75 4.3 2.95 0 1.14-.63 2.23-1.67 2.61v.06c1.33.3 2.3 1.23 2.3 2.86 0 2.39-1.97 3.52-4.61 3.52H1V2zm3.66 4.95c1.67 0 2.38-.66 2.38-1.69 0-1.17-.78-1.61-2.34-1.61H3.13v3.3h1.53zm.27 5.39c1.77 0 2.75-.64 2.75-1.98 0-1.27-.95-1.81-2.75-1.81h-1.8v3.8h1.8v-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Book returns an "book" Octicon SVG node. +func Book() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M3 5h4v1H3V5zm0 3h4V7H3v1zm0 2h4V9H3v1zm11-5h-4v1h4V5zm0 2h-4v1h4V7zm0 2h-4v1h4V9zm2-6v9c0 .55-.45 1-1 1H9.5l-1 1-1-1H2c-.55 0-1-.45-1-1V3c0-.55.45-1 1-1h5.5l1 1 1-1H15c.55 0 1 .45 1 1zm-8 .5L7.5 3H2v9h6V3.5zm7-.5H9.5l-.5.5V12h6V3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Bookmark returns an "bookmark" Octicon SVG node. +func Bookmark() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M9 0H1C.27 0 0 .27 0 1v15l5-3.09L10 16V1c0-.73-.27-1-1-1zm-.78 4.25L6.36 5.61l.72 2.16c.06.22-.02.28-.2.17L5 6.6 3.12 7.94c-.19.11-.25.05-.2-.17l.72-2.16-1.86-1.36c-.17-.16-.14-.23.09-.23l2.3-.03.7-2.16h.25l.7 2.16 2.3.03c.23 0 .27.08.09.23h.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Briefcase returns an "briefcase" Octicon SVG node. +func Briefcase() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M9 4V3c0-.55-.45-1-1-1H6c-.55 0-1 .45-1 1v1H1c-.55 0-1 .45-1 1v8c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V5c0-.55-.45-1-1-1H9zM6 3h2v1H6V3zm7 6H8v1H6V9H1V5h1v3h10V5h1v4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Broadcast returns an "broadcast" Octicon SVG node. +func Broadcast() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M9 9H8c.55 0 1-.45 1-1V7c0-.55-.45-1-1-1H7c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1H6c-.55 0-1 .45-1 1v2h1v3c0 .55.45 1 1 1h1c.55 0 1-.45 1-1v-3h1v-2c0-.55-.45-1-1-1zM7 7h1v1H7V7zm2 4H8v4H7v-4H6v-1h3v1zm2.09-3.5c0-1.98-1.61-3.59-3.59-3.59A3.593 3.593 0 0 0 4 8.31v1.98c-.61-.77-1-1.73-1-2.8 0-2.48 2.02-4.5 4.5-4.5S12 5.01 12 7.49c0 1.06-.39 2.03-1 2.8V8.31c.06-.27.09-.53.09-.81zm3.91 0c0 2.88-1.63 5.38-4 6.63v-1.05a6.553 6.553 0 0 0 3.09-5.58A6.59 6.59 0 0 0 7.5.91 6.59 6.59 0 0 0 .91 7.5c0 2.36 1.23 4.42 3.09 5.58v1.05A7.497 7.497 0 0 1 7.5 0C11.64 0 15 3.36 15 7.5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Browser returns an "browser" Octicon SVG node. +func Browser() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M5 3h1v1H5V3zM3 3h1v1H3V3zM1 3h1v1H1V3zm12 10H1V5h12v8zm0-9H7V3h6v1zm1-1c0-.55-.45-1-1-1H1c-.55 0-1 .45-1 1v10c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Bug returns an "bug" Octicon SVG node. +func Bug() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11 10h3V9h-3V8l3.17-1.03-.34-.94L11 7V6c0-.55-.45-1-1-1V4c0-.48-.36-.88-.83-.97L10.2 2H12V1H9.8l-2 2h-.59L5.2 1H3v1h1.8l1.03 1.03C5.36 3.12 5 3.51 5 4v1c-.55 0-1 .45-1 1v1l-2.83-.97-.34.94L4 8v1H1v1h3v1L.83 12.03l.34.94L4 12v1c0 .55.45 1 1 1h1l1-1V6h1v7l1 1h1c.55 0 1-.45 1-1v-1l2.83.97.34-.94L11 11v-1zM9 5H6V4h3v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Calendar returns an "calendar" Octicon SVG node. +func Calendar() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 2h-1v1.5c0 .28-.22.5-.5.5h-2c-.28 0-.5-.22-.5-.5V2H6v1.5c0 .28-.22.5-.5.5h-2c-.28 0-.5-.22-.5-.5V2H2c-.55 0-1 .45-1 1v11c0 .55.45 1 1 1h11c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm0 12H2V5h11v9zM5 3H4V1h1v2zm6 0h-1V1h1v2zM6 7H5V6h1v1zm2 0H7V6h1v1zm2 0H9V6h1v1zm2 0h-1V6h1v1zM4 9H3V8h1v1zm2 0H5V8h1v1zm2 0H7V8h1v1zm2 0H9V8h1v1zm2 0h-1V8h1v1zm-8 2H3v-1h1v1zm2 0H5v-1h1v1zm2 0H7v-1h1v1zm2 0H9v-1h1v1zm2 0h-1v-1h1v1zm-8 2H3v-1h1v1zm2 0H5v-1h1v1zm2 0H7v-1h1v1zm2 0H9v-1h1v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Check returns an "check" Octicon SVG node. +func Check() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12 5l-8 8-4-4 1.5-1.5L4 10l6.5-6.5L12 5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Checklist returns an "checklist" Octicon SVG node. +func Checklist() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M16 8.5l-6 6-3-3L8.5 10l1.5 1.5L14.5 7 16 8.5zM5.7 12.2l.8.8H2c-.55 0-1-.45-1-1V3c0-.55.45-1 1-1h7c.55 0 1 .45 1 1v6.5l-.8-.8c-.39-.39-1.03-.39-1.42 0L5.7 10.8a.996.996 0 0 0 0 1.41v-.01zM4 4h5V3H4v1zm0 2h5V5H4v1zm0 2h3V7H4v1zM3 9H2v1h1V9zm0-2H2v1h1V7zm0-2H2v1h1V5zm0-2H2v1h1V3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ChevronDown returns an "chevron-down" Octicon SVG node. +func ChevronDown() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M5 11L0 6l1.5-1.5L5 8.25 8.5 4.5 10 6l-5 5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ChevronLeft returns an "chevron-left" Octicon SVG node. +func ChevronLeft() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M5.5 3L7 4.5 3.25 8 7 11.5 5.5 13l-5-5 5-5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 8 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ChevronRight returns an "chevron-right" Octicon SVG node. +func ChevronRight() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7.5 8l-5 5L1 11.5 4.75 8 1 4.5 2.5 3l5 5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 8 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ChevronUp returns an "chevron-up" Octicon SVG node. +func ChevronUp() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 10l-1.5 1.5L5 7.75 1.5 11.5 0 10l5-5 5 5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// CircleSlash returns an "circle-slash" Octicon SVG node. +func CircleSlash() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm0 1.3c1.3 0 2.5.44 3.47 1.17l-8 8A5.755 5.755 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zm0 11.41c-1.3 0-2.5-.44-3.47-1.17l8-8c.73.97 1.17 2.17 1.17 3.47 0 3.14-2.56 5.7-5.7 5.7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// CircuitBoard returns an "circuit-board" Octicon SVG node. +func CircuitBoard() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M3 5c0-.55.45-1 1-1s1 .45 1 1-.45 1-1 1-1-.45-1-1zm8 0c0-.55-.45-1-1-1s-1 .45-1 1 .45 1 1 1 1-.45 1-1zm0 6c0-.55-.45-1-1-1s-1 .45-1 1 .45 1 1 1 1-.45 1-1zm2-10H5v2.17c.36.19.64.47.83.83h2.34c.42-.78 1.33-1.28 2.34-1.05.75.19 1.36.8 1.53 1.55.31 1.38-.72 2.59-2.05 2.59-.8 0-1.48-.44-1.83-1.09H5.83c-.42.8-1.33 1.28-2.34 1.03-.73-.17-1.34-.78-1.52-1.52C1.72 4.49 2.2 3.59 3 3.17V1H1c-.55 0-1 .45-1 1v12c0 .55.45 1 1 1l5-5h2.17c.42-.78 1.33-1.28 2.34-1.05.75.19 1.36.8 1.53 1.55.31 1.38-.72 2.59-2.05 2.59-.8 0-1.48-.44-1.83-1.09H6.99L4 15h9c.55 0 1-.45 1-1V2c0-.55-.45-1-1-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Clippy returns an "clippy" Octicon SVG node. +func Clippy() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M2 13h4v1H2v-1zm5-6H2v1h5V7zm2 3V8l-3 3 3 3v-2h5v-2H9zM4.5 9H2v1h2.5V9zM2 12h2.5v-1H2v1zm9 1h1v2c-.02.28-.11.52-.3.7-.19.18-.42.28-.7.3H1c-.55 0-1-.45-1-1V4c0-.55.45-1 1-1h3c0-1.11.89-2 2-2 1.11 0 2 .89 2 2h3c.55 0 1 .45 1 1v5h-1V6H1v9h10v-2zM2 5h8c0-.55-.45-1-1-1H8c-.55 0-1-.45-1-1s-.45-1-1-1-1 .45-1 1-.45 1-1 1H3c-.55 0-1 .45-1 1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Clock returns an "clock" Octicon SVG node. +func Clock() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 8h3v2H7c-.55 0-1-.45-1-1V4h2v4zM7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// CloudDownload returns an "cloud-download" Octicon SVG node. +func CloudDownload() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M9 12h2l-3 3-3-3h2V7h2v5zm3-8c0-.44-.91-3-4.5-3C5.08 1 3 2.92 3 5 1.02 5 0 6.52 0 8c0 1.53 1 3 3 3h3V9.7H3C1.38 9.7 1.3 8.28 1.3 8c0-.17.05-1.7 1.7-1.7h1.3V5c0-1.39 1.56-2.7 3.2-2.7 2.55 0 3.13 1.55 3.2 1.8v1.2H12c.81 0 2.7.22 2.7 2.2 0 2.09-2.25 2.2-2.7 2.2h-2V11h2c2.08 0 4-1.16 4-3.5C16 5.06 14.08 4 12 4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// CloudUpload returns an "cloud-upload" Octicon SVG node. +func CloudUpload() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 9H5l3-3 3 3H9v5H7V9zm5-4c0-.44-.91-3-4.5-3C5.08 2 3 3.92 3 6 1.02 6 0 7.52 0 9c0 1.53 1 3 3 3h3v-1.3H3c-1.62 0-1.7-1.42-1.7-1.7 0-.17.05-1.7 1.7-1.7h1.3V6c0-1.39 1.56-2.7 3.2-2.7 2.55 0 3.13 1.55 3.2 1.8v1.2H12c.81 0 2.7.22 2.7 2.2 0 2.09-2.25 2.2-2.7 2.2h-2V12h2c2.08 0 4-1.16 4-3.5C16 6.06 14.08 5 12 5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Code returns an "code" Octicon SVG node. +func Code() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M9.5 3L8 4.5 11.5 8 8 11.5 9.5 13 14 8 9.5 3zm-5 0L0 8l4.5 5L6 11.5 2.5 8 6 4.5 4.5 3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Comment returns an "comment" Octicon SVG node. +func Comment() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M14 1H2c-.55 0-1 .45-1 1v8c0 .55.45 1 1 1h2v3.5L7.5 11H14c.55 0 1-.45 1-1V2c0-.55-.45-1-1-1zm0 9H7l-2 2v-2H2V2h12v8z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// CommentDiscussion returns an "comment-discussion" Octicon SVG node. +func CommentDiscussion() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15 1H6c-.55 0-1 .45-1 1v2H1c-.55 0-1 .45-1 1v6c0 .55.45 1 1 1h1v3l3-3h4c.55 0 1-.45 1-1V9h1l3 3V9h1c.55 0 1-.45 1-1V2c0-.55-.45-1-1-1zM9 11H4.5L3 12.5V11H1V5h4v3c0 .55.45 1 1 1h3v2zm6-3h-2v1.5L11.5 8H6V2h9v6z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// CreditCard returns an "credit-card" Octicon SVG node. +func CreditCard() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12 9H2V8h10v1zm4-6v9c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V3c0-.55.45-1 1-1h14c.55 0 1 .45 1 1zm-1 3H1v6h14V6zm0-3H1v1h14V3zm-9 7H2v1h4v-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Dash returns an "dash" Octicon SVG node. +func Dash() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 7v2h8V7H0z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 8 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Dashboard returns an "dashboard" Octicon SVG node. +func Dashboard() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M9 5H8V4h1v1zm4 3h-1v1h1V8zM6 5H5v1h1V5zM5 8H4v1h1V8zm11-5.5l-.5-.5L9 7c-.06-.02-1 0-1 0-.55 0-1 .45-1 1v1c0 .55.45 1 1 1h1c.55 0 1-.45 1-1v-.92l6-5.58zm-1.59 4.09c.19.61.3 1.25.3 1.91 0 3.42-2.78 6.2-6.2 6.2-3.42 0-6.21-2.78-6.21-6.2 0-3.42 2.78-6.2 6.2-6.2 1.2 0 2.31.34 3.27.94l.94-.94A7.459 7.459 0 0 0 8.51 1C4.36 1 1 4.36 1 8.5 1 12.64 4.36 16 8.5 16c4.14 0 7.5-3.36 7.5-7.5 0-1.03-.2-2.02-.59-2.91l-1 1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Database returns an "database" Octicon SVG node. +func Database() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 15c-3.31 0-6-.9-6-2v-2c0-.17.09-.34.21-.5.67.86 3 1.5 5.79 1.5s5.12-.64 5.79-1.5c.13.16.21.33.21.5v2c0 1.1-2.69 2-6 2zm0-4c-3.31 0-6-.9-6-2V7c0-.11.04-.21.09-.31.03-.06.07-.13.12-.19C.88 7.36 3.21 8 6 8s5.12-.64 5.79-1.5c.05.06.09.13.12.19.05.1.09.21.09.31v2c0 1.1-2.69 2-6 2zm0-4c-3.31 0-6-.9-6-2V3c0-1.1 2.69-2 6-2s6 .9 6 2v2c0 1.1-2.69 2-6 2zm0-5c-2.21 0-4 .45-4 1s1.79 1 4 1 4-.45 4-1-1.79-1-4-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DesktopDownload returns an "desktop-download" Octicon SVG node. +func DesktopDownload() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 6h3V0h2v6h3l-4 4-4-4zm11-4h-4v1h4v8H1V3h4V2H1c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h5.34c-.25.61-.86 1.39-2.34 2h8c-1.48-.61-2.09-1.39-2.34-2H15c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DeviceCamera returns an "device-camera" Octicon SVG node. +func DeviceCamera() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15 3H7c0-.55-.45-1-1-1H2c-.55 0-1 .45-1 1-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h14c.55 0 1-.45 1-1V4c0-.55-.45-1-1-1zM6 5H2V4h4v1zm4.5 7C8.56 12 7 10.44 7 8.5S8.56 5 10.5 5 14 6.56 14 8.5 12.44 12 10.5 12zM13 8.5c0 1.38-1.13 2.5-2.5 2.5S8 9.87 8 8.5 9.13 6 10.5 6 13 7.13 13 8.5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DeviceCameraVideo returns an "device-camera-video" Octicon SVG node. +func DeviceCameraVideo() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15.2 2.09L10 5.72V3c0-.55-.45-1-1-1H1c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h8c.55 0 1-.45 1-1V9.28l5.2 3.63c.33.23.8 0 .8-.41v-10c0-.41-.47-.64-.8-.41z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DeviceDesktop returns an "device-desktop" Octicon SVG node. +func DeviceDesktop() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15 2H1c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h5.34c-.25.61-.86 1.39-2.34 2h8c-1.48-.61-2.09-1.39-2.34-2H15c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm0 9H1V3h14v8z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DeviceMobile returns an "device-mobile" Octicon SVG node. +func DeviceMobile() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M9 0H1C.45 0 0 .45 0 1v14c0 .55.45 1 1 1h8c.55 0 1-.45 1-1V1c0-.55-.45-1-1-1zM5 15.3c-.72 0-1.3-.58-1.3-1.3 0-.72.58-1.3 1.3-1.3.72 0 1.3.58 1.3 1.3 0 .72-.58 1.3-1.3 1.3zM9 12H1V2h8v10z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Diff returns an "diff" Octicon SVG node. +func Diff() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 7h2v1H6v2H5V8H3V7h2V5h1v2zm-3 6h5v-1H3v1zM7.5 2L11 5.5V15c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V3c0-.55.45-1 1-1h6.5zM10 6L7 3H1v12h9V6zM8.5 0H3v1h5l4 4v8h1V4.5L8.5 0z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 13 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DiffAdded returns an "diff-added" Octicon SVG node. +func DiffAdded() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 1H1c-.55 0-1 .45-1 1v12c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V2c0-.55-.45-1-1-1zm0 13H1V2h12v12zM6 9H3V7h3V4h2v3h3v2H8v3H6V9z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DiffIgnored returns an "diff-ignored" Octicon SVG node. +func DiffIgnored() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 1H1c-.55 0-1 .45-1 1v12c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V2c0-.55-.45-1-1-1zm0 13H1V2h12v12zm-8.5-2H3v-1.5L9.5 4H11v1.5L4.5 12z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DiffModified returns an "diff-modified" Octicon SVG node. +func DiffModified() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 1H1c-.55 0-1 .45-1 1v12c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V2c0-.55-.45-1-1-1zm0 13H1V2h12v12zM4 8c0-1.66 1.34-3 3-3s3 1.34 3 3-1.34 3-3 3-3-1.34-3-3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DiffRemoved returns an "diff-removed" Octicon SVG node. +func DiffRemoved() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 1H1c-.55 0-1 .45-1 1v12c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V2c0-.55-.45-1-1-1zm0 13H1V2h12v12zm-2-5H3V7h8v2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// DiffRenamed returns an "diff-renamed" Octicon SVG node. +func DiffRenamed() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 9H3V7h3V4l5 4-5 4V9zm8-7v12c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V2c0-.55.45-1 1-1h12c.55 0 1 .45 1 1zm-1 0H1v12h12V2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Ellipsis returns an "ellipsis" Octicon SVG node. +func Ellipsis() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11 5H1c-.55 0-1 .45-1 1v4c0 .55.45 1 1 1h10c.55 0 1-.45 1-1V6c0-.55-.45-1-1-1zM4 9H2V7h2v2zm3 0H5V7h2v2zm3 0H8V7h2v2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Eye returns an "eye" Octicon SVG node. +func Eye() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8.06 2C3 2 0 8 0 8s3 6 8.06 6C13 14 16 8 16 8s-3-6-7.94-6zM8 12c-2.2 0-4-1.78-4-4 0-2.2 1.8-4 4-4 2.22 0 4 1.8 4 4 0 2.22-1.78 4-4 4zm2-4c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// File returns an "file" Octicon SVG node. +func File() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 5H2V4h4v1zM2 8h7V7H2v1zm0 2h7V9H2v1zm0 2h7v-1H2v1zm10-7.5V14c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V2c0-.55.45-1 1-1h7.5L12 4.5zM11 5L8 2H1v12h10V5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FileBinary returns an "file-binary" Octicon SVG node. +func FileBinary() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 12h1v1H2v-1h1v-2H2V9h2v3zm8-7.5V14c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V2c0-.55.45-1 1-1h7.5L12 4.5zM11 5L8 2H1v12h10V5zM8 4H6v1h1v2H6v1h3V7H8V4zM2 4h3v4H2V4zm1 3h1V5H3v2zm3 2h3v4H6V9zm1 3h1v-2H7v2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FileCode returns an "file-code" Octicon SVG node. +func FileCode() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8.5 1H1c-.55 0-1 .45-1 1v12c0 .55.45 1 1 1h10c.55 0 1-.45 1-1V4.5L8.5 1zM11 14H1V2h7l3 3v9zM5 6.98L3.5 8.5 5 10l-.5 1L2 8.5 4.5 6l.5.98zM7.5 6L10 8.5 7.5 11l-.5-.98L8.5 8.5 7 7l.5-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FileDirectory returns an "file-directory" Octicon SVG node. +func FileDirectory() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 4H7V3c0-.66-.31-1-1-1H1c-.55 0-1 .45-1 1v10c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V5c0-.55-.45-1-1-1zM6 4H1V3h5v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FileMedia returns an "file-media" Octicon SVG node. +func FileMedia() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 5h2v2H6V5zm6-.5V14c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V2c0-.55.45-1 1-1h7.5L12 4.5zM11 5L8 2H1v11l3-5 2 4 2-2 3 3V5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FilePdf returns an "file-pdf" Octicon SVG node. +func FilePdf() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8.5 1H1a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h10a1 1 0 0 0 1-1V4.5L8.5 1zM1 2h4a.68.68 0 0 0-.31.2 1.08 1.08 0 0 0-.23.47 4.22 4.22 0 0 0-.09 1.47c.06.609.173 1.211.34 1.8A21.78 21.78 0 0 1 3.6 8.6c-.5 1-.8 1.66-.91 1.84a7.156 7.156 0 0 0-.69.3c-.362.165-.699.38-1 .64V2zm4.42 4.8a5.65 5.65 0 0 0 1.17 2.09c.275.237.595.417.94.53-.64.09-1.23.2-1.81.33-.618.15-1.223.347-1.81.59s.22-.44.61-1.25c.365-.74.67-1.51.91-2.3l-.01.01zM11 14H1.5a.743.743 0 0 1-.17 0 2.12 2.12 0 0 0 .73-.44 10.14 10.14 0 0 0 1.78-2.38c.31-.13.58-.23.81-.31l.42-.14c.45-.13.94-.23 1.44-.33s1-.16 1.48-.2c.447.216.912.394 1.39.53.403.11.814.188 1.23.23h.38V14H11zm0-4.86a3.743 3.743 0 0 0-.64-.28 4.221 4.221 0 0 0-.75-.11c-.411.003-.822.03-1.23.08a3 3 0 0 1-1-.64 6.07 6.07 0 0 1-1.29-2.33c.111-.661.178-1.33.2-2 .02-.25.02-.5 0-.75a1.05 1.05 0 0 0-.2-.88.82.82 0 0 0-.61-.23H8l3 3v4.14z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FileSubmodule returns an "file-submodule" Octicon SVG node. +func FileSubmodule() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 7H4v7h9c.55 0 1-.45 1-1V8h-4V7zM9 9H5V8h4v1zm4-5H7V3c0-.66-.31-1-1-1H1c-.55 0-1 .45-1 1v10c0 .55.45 1 1 1h2V7c0-.55.45-1 1-1h6c.55 0 1 .45 1 1h3V5c0-.55-.45-1-1-1zM6 4H1V3h5v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FileSymlinkDirectory returns an "file-symlink-directory" Octicon SVG node. +func FileSymlinkDirectory() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 4H7V3c0-.66-.31-1-1-1H1c-.55 0-1 .45-1 1v10c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V5c0-.55-.45-1-1-1zM1 3h5v1H1V3zm6 9v-2c-.98-.02-1.84.22-2.55.7-.71.48-1.19 1.25-1.45 2.3.02-1.64.39-2.88 1.13-3.73C4.86 8.43 5.82 8 7.01 8V6l4 3-4 3H7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FileSymlinkFile returns an "file-symlink-file" Octicon SVG node. +func FileSymlinkFile() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8.5 1H1c-.55 0-1 .45-1 1v12c0 .55.45 1 1 1h10c.55 0 1-.45 1-1V4.5L8.5 1zM11 14H1V2h7l3 3v9zM6 4.5l4 3-4 3v-2c-.98-.02-1.84.22-2.55.7-.71.48-1.19 1.25-1.45 2.3.02-1.64.39-2.88 1.13-3.73.73-.84 1.69-1.27 2.88-1.27v-2H6z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// FileZip returns an "file-zip" Octicon SVG node. +func FileZip() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8.5 1H1a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h10a1 1 0 0 0 1-1V4.5L8.5 1zM11 14H1V2h3v1h1V2h3l3 3v9zM5 4V3h1v1H5zM4 4h1v1H4V4zm1 2V5h1v1H5zM4 6h1v1H4V6zm1 2V7h1v1H5zM4 9.28A2 2 0 0 0 3 11v1h4v-1a2 2 0 0 0-2-2V8H4v1.28zM6 10v1H4v-1h2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Flame returns an "flame" Octicon SVG node. +func Flame() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M5.05.31c.81 2.17.41 3.38-.52 4.31C3.55 5.67 1.98 6.45.9 7.98c-1.45 2.05-1.7 6.53 3.53 7.7-2.2-1.16-2.67-4.52-.3-6.61-.61 2.03.53 3.33 1.94 2.86 1.39-.47 2.3.53 2.27 1.67-.02.78-.31 1.44-1.13 1.81 3.42-.59 4.78-3.42 4.78-5.56 0-2.84-2.53-3.22-1.25-5.61-1.52.13-2.03 1.13-1.89 2.75.09 1.08-1.02 1.8-1.86 1.33-.67-.41-.66-1.19-.06-1.78C8.18 5.31 8.68 2.45 5.05.32L5.03.3l.02.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Fold returns an "fold" Octicon SVG node. +func Fold() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 9l3 3H8v3H6v-3H4l3-3zm3-6H8V0H6v3H4l3 3 3-3zm4 2c0-.55-.45-1-1-1h-2.5l-1 1h3l-2 2h-7l-2-2h3l-1-1H1c-.55 0-1 .45-1 1l2.5 2.5L0 10c0 .55.45 1 1 1h2.5l1-1h-3l2-2h7l2 2h-3l1 1H13c.55 0 1-.45 1-1l-2.5-2.5L14 5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Gear returns an "gear" Octicon SVG node. +func Gear() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M14 8.77v-1.6l-1.94-.64-.45-1.09.88-1.84-1.13-1.13-1.81.91-1.09-.45-.69-1.92h-1.6l-.63 1.94-1.11.45-1.84-.88-1.13 1.13.91 1.81-.45 1.09L0 7.23v1.59l1.94.64.45 1.09-.88 1.84 1.13 1.13 1.81-.91 1.09.45.69 1.92h1.59l.63-1.94 1.11-.45 1.84.88 1.13-1.13-.92-1.81.47-1.09L14 8.75v.02zM7 11c-1.66 0-3-1.34-3-3s1.34-3 3-3 3 1.34 3 3-1.34 3-3 3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Gift returns an "gift" Octicon SVG node. +func Gift() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 4h-1.38c.19-.33.33-.67.36-.91.06-.67-.11-1.22-.52-1.61C11.1 1.1 10.65 1 10.1 1h-.11c-.53.02-1.11.25-1.53.58-.42.33-.73.72-.97 1.2-.23-.48-.55-.88-.97-1.2-.42-.32-1-.58-1.53-.58h-.03c-.56 0-1.06.09-1.44.48-.41.39-.58.94-.52 1.61.03.23.17.58.36.91H1.98c-.55 0-1 .45-1 1v3h1v5c0 .55.45 1 1 1h9c.55 0 1-.45 1-1V8h1V5c0-.55-.45-1-1-1H13zm-4.78-.88c.17-.36.42-.67.75-.92.3-.23.72-.39 1.05-.41h.09c.45 0 .66.11.8.25s.33.39.3.95c-.05.19-.25.61-.5 1h-2.9l.41-.88v.01zM4.09 2.04c.13-.13.31-.25.91-.25.31 0 .72.17 1.03.41.33.25.58.55.75.92L7.2 4H4.3c-.25-.39-.45-.81-.5-1-.03-.56.16-.81.3-.95l-.01-.01zM7 12.99H3V8h4v5-.01zm0-6H2V5h5v2-.01zm5 6H8V8h4v5-.01zm1-6H8V5h5v2-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Gist returns an "gist" Octicon SVG node. +func Gist() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7.5 5L10 7.5 7.5 10l-.75-.75L8.5 7.5 6.75 5.75 7.5 5zm-3 0L2 7.5 4.5 10l.75-.75L3.5 7.5l1.75-1.75L4.5 5zM0 13V2c0-.55.45-1 1-1h10c.55 0 1 .45 1 1v11c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1zm1 0h10V2H1v11z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// GistSecret returns an "gist-secret" Octicon SVG node. +func GistSecret() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 10.5L9 14H5l1-3.5L5.25 9h3.5L8 10.5zM10 6H4L2 7h10l-2-1zM9 2L7 3 5 2 4 5h6L9 2zm4.03 7.75L10 9l1 2-2 3h3.22c.45 0 .86-.31.97-.75l.56-2.28c.14-.53-.19-1.08-.72-1.22zM4 9l-3.03.75c-.53.14-.86.69-.72 1.22l.56 2.28c.11.44.52.75.97.75H5l-2-3 1-2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// GitBranch returns an "git-branch" Octicon SVG node. +func GitBranch() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 5c0-1.11-.89-2-2-2a1.993 1.993 0 0 0-1 3.72v.3c-.02.52-.23.98-.63 1.38-.4.4-.86.61-1.38.63-.83.02-1.48.16-2 .45V4.72a1.993 1.993 0 0 0-1-3.72C.88 1 0 1.89 0 3a2 2 0 0 0 1 1.72v6.56c-.59.35-1 .99-1 1.72 0 1.11.89 2 2 2 1.11 0 2-.89 2-2 0-.53-.2-1-.53-1.36.09-.06.48-.41.59-.47.25-.11.56-.17.94-.17 1.05-.05 1.95-.45 2.75-1.25S8.95 7.77 9 6.73h-.02C9.59 6.37 10 5.73 10 5zM2 1.8c.66 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2C1.35 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2zm0 12.41c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm6-8c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// GitCommit returns an "git-commit" Octicon SVG node. +func GitCommit() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10.86 7c-.45-1.72-2-3-3.86-3-1.86 0-3.41 1.28-3.86 3H0v2h3.14c.45 1.72 2 3 3.86 3 1.86 0 3.41-1.28 3.86-3H14V7h-3.14zM7 10.2c-1.22 0-2.2-.98-2.2-2.2 0-1.22.98-2.2 2.2-2.2 1.22 0 2.2.98 2.2 2.2 0 1.22-.98 2.2-2.2 2.2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// GitCompare returns an "git-compare" Octicon SVG node. +func GitCompare() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M5 12H4c-.27-.02-.48-.11-.69-.31-.21-.2-.3-.42-.31-.69V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V11c.03.78.34 1.47.94 2.06.6.59 1.28.91 2.06.94h1v2l3-3-3-3v2zM2 1.8c.66 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2C1.35 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2zm11 9.48V5c-.03-.78-.34-1.47-.94-2.06-.6-.59-1.28-.91-2.06-.94H9V0L6 3l3 3V4h1c.27.02.48.11.69.31.21.2.3.42.31.69v6.28A1.993 1.993 0 0 0 12 15a1.993 1.993 0 0 0 1-3.72zm-1 2.92c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// GitMerge returns an "git-merge" Octicon SVG node. +func GitMerge() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 7c-.73 0-1.38.41-1.73 1.02V8C7.22 7.98 6 7.64 5.14 6.98c-.75-.58-1.5-1.61-1.89-2.44A1.993 1.993 0 0 0 2 .99C.89.99 0 1.89 0 3a2 2 0 0 0 1 1.72v6.56c-.59.35-1 .99-1 1.72 0 1.11.89 2 2 2a1.993 1.993 0 0 0 1-3.72V7.67c.67.7 1.44 1.27 2.3 1.69.86.42 2.03.63 2.97.64v-.02c.36.61 1 1.02 1.73 1.02 1.11 0 2-.89 2-2 0-1.11-.89-2-2-2zm-6.8 6c0 .66-.55 1.2-1.2 1.2-.65 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm8 6c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// GitPullRequest returns an "git-pull-request" Octicon SVG node. +func GitPullRequest() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11 11.28V5c-.03-.78-.34-1.47-.94-2.06C9.46 2.35 8.78 2.03 8 2H7V0L4 3l3 3V4h1c.27.02.48.11.69.31.21.2.3.42.31.69v6.28A1.993 1.993 0 0 0 10 15a1.993 1.993 0 0 0 1-3.72zm-1 2.92c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zM4 3c0-1.11-.89-2-2-2a1.993 1.993 0 0 0-1 3.72v6.56A1.993 1.993 0 0 0 2 15a1.993 1.993 0 0 0 1-3.72V4.72c.59-.34 1-.98 1-1.72zm-.8 10c0 .66-.55 1.2-1.2 1.2-.65 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Globe returns an "globe" Octicon SVG node. +func Globe() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 1C3.14 1 0 4.14 0 8s3.14 7 7 7c.48 0 .94-.05 1.38-.14-.17-.08-.2-.73-.02-1.09.19-.41.81-1.45.2-1.8-.61-.35-.44-.5-.81-.91-.37-.41-.22-.47-.25-.58-.08-.34.36-.89.39-.94.02-.06.02-.27 0-.33 0-.08-.27-.22-.34-.23-.06 0-.11.11-.2.13-.09.02-.5-.25-.59-.33-.09-.08-.14-.23-.27-.34-.13-.13-.14-.03-.33-.11s-.8-.31-1.28-.48c-.48-.19-.52-.47-.52-.66-.02-.2-.3-.47-.42-.67-.14-.2-.16-.47-.2-.41-.04.06.25.78.2.81-.05.02-.16-.2-.3-.38-.14-.19.14-.09-.3-.95s.14-1.3.17-1.75c.03-.45.38.17.19-.13-.19-.3 0-.89-.14-1.11-.13-.22-.88.25-.88.25.02-.22.69-.58 1.16-.92.47-.34.78-.06 1.16.05.39.13.41.09.28-.05-.13-.13.06-.17.36-.13.28.05.38.41.83.36.47-.03.05.09.11.22s-.06.11-.38.3c-.3.2.02.22.55.61s.38-.25.31-.55c-.07-.3.39-.06.39-.06.33.22.27.02.5.08.23.06.91.64.91.64-.83.44-.31.48-.17.59.14.11-.28.3-.28.3-.17-.17-.19.02-.3.08-.11.06-.02.22-.02.22-.56.09-.44.69-.42.83 0 .14-.38.36-.47.58-.09.2.25.64.06.66-.19.03-.34-.66-1.31-.41-.3.08-.94.41-.59 1.08.36.69.92-.19 1.11-.09.19.1-.06.53-.02.55.04.02.53.02.56.61.03.59.77.53.92.55.17 0 .7-.44.77-.45.06-.03.38-.28 1.03.09.66.36.98.31 1.2.47.22.16.08.47.28.58.2.11 1.06-.03 1.28.31.22.34-.88 2.09-1.22 2.28-.34.19-.48.64-.84.92s-.81.64-1.27.91c-.41.23-.47.66-.66.8 3.14-.7 5.48-3.5 5.48-6.84 0-3.86-3.14-7-7-7L7 1zm1.64 6.56c-.09.03-.28.22-.78-.08-.48-.3-.81-.23-.86-.28 0 0-.05-.11.17-.14.44-.05.98.41 1.11.41.13 0 .19-.13.41-.05.22.08.05.13-.05.14zM6.34 1.7c-.05-.03.03-.08.09-.14.03-.03.02-.11.05-.14.11-.11.61-.25.52.03-.11.27-.58.3-.66.25zm1.23.89c-.19-.02-.58-.05-.52-.14.3-.28-.09-.38-.34-.38-.25-.02-.34-.16-.22-.19.12-.03.61.02.7.08.08.06.52.25.55.38.02.13 0 .25-.17.25zm1.47-.05c-.14.09-.83-.41-.95-.52-.56-.48-.89-.31-1-.41-.11-.1-.08-.19.11-.34.19-.15.69.06 1 .09.3.03.66.27.66.55.02.25.33.5.19.63h-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Grabber returns an "grabber" Octicon SVG node. +func Grabber() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 4v1H0V4h8zM0 8h8V7H0v1zm0 3h8v-1H0v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 8 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Graph returns an "graph" Octicon SVG node. +func Graph() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M16 14v1H0V0h1v14h15zM5 13H3V8h2v5zm4 0H7V3h2v10zm4 0h-2V6h2v7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Heart returns an "heart" Octicon SVG node. +func Heart() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M9 2c-.97 0-1.69.42-2.2 1-.51.58-.78.92-.8 1-.02-.08-.28-.42-.8-1-.52-.58-1.17-1-2.2-1-1.632.086-2.954 1.333-3 3 0 .52.09 1.52.67 2.67C1.25 8.82 3.01 10.61 6 13c2.98-2.39 4.77-4.17 5.34-5.33C11.91 6.51 12 5.5 12 5c-.047-1.69-1.342-2.913-3-3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// History returns an "history" Octicon SVG node. +func History() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 13H6V6h5v2H8v5zM7 1C4.81 1 2.87 2.02 1.59 3.59L0 2v4h4L2.5 4.5C3.55 3.17 5.17 2.3 7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-.34.03-.67.09-1H.08C.03 7.33 0 7.66 0 8c0 3.86 3.14 7 7 7s7-3.14 7-7-3.14-7-7-7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Home returns an "home" Octicon SVG node. +func Home() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M16 9l-3-3V2h-2v2L8 1 0 9h2l1 5c0 .55.45 1 1 1h8c.55 0 1-.45 1-1l1-5h2zm-4 5H9v-4H7v4H4L2.81 7.69 8 2.5l5.19 5.19L12 14z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// HorizontalRule returns an "horizontal-rule" Octicon SVG node. +func HorizontalRule() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M1 7h2v2h1V3H3v3H1V3H0v6h1V7zm9 2V7H9v2h1zm0-3V4H9v2h1zM7 6V4h2V3H6v6h1V7h2V6H7zm-7 7h10v-2H0v2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Hubot returns an "hubot" Octicon SVG node. +func Hubot() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M3 6c-.55 0-1 .45-1 1v2c0 .55.45 1 1 1h8c.55 0 1-.45 1-1V7c0-.55-.45-1-1-1H3zm8 1.75L9.75 9h-1.5L7 7.75 5.75 9h-1.5L3 7.75V7h.75L5 8.25 6.25 7h1.5L9 8.25 10.25 7H11v.75zM5 11h4v1H5v-1zm2-9C3.14 2 0 4.91 0 8.5V13c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V8.5C14 4.91 10.86 2 7 2zm6 11H1V8.5c0-3.09 2.64-5.59 6-5.59s6 2.5 6 5.59V13z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Inbox returns an "inbox" Octicon SVG node. +func Inbox() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M14 9l-1.13-7.14c-.08-.48-.5-.86-1-.86H2.13c-.5 0-.92.38-1 .86L0 9v5c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V9zm-3.28.55l-.44.89c-.17.34-.52.56-.91.56H4.61c-.38 0-.72-.22-.89-.55l-.44-.91c-.17-.33-.52-.55-.89-.55H1l1-7h10l1 7h-1.38c-.39 0-.73.22-.91.55l.01.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Info returns an "info" Octicon SVG node. +func Info() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6.3 5.69a.942.942 0 0 1-.28-.7c0-.28.09-.52.28-.7.19-.18.42-.28.7-.28.28 0 .52.09.7.28.18.19.28.42.28.7 0 .28-.09.52-.28.7a1 1 0 0 1-.7.3c-.28 0-.52-.11-.7-.3zM8 7.99c-.02-.25-.11-.48-.31-.69-.2-.19-.42-.3-.69-.31H6c-.27.02-.48.13-.69.31-.2.2-.3.44-.31.69h1v3c.02.27.11.5.31.69.2.2.42.31.69.31h1c.27 0 .48-.11.69-.31.2-.19.3-.42.31-.69H8V7.98v.01zM7 2.3c-3.14 0-5.7 2.54-5.7 5.68 0 3.14 2.56 5.7 5.7 5.7s5.7-2.55 5.7-5.7c0-3.15-2.56-5.69-5.7-5.69v.01zM7 .98c3.86 0 7 3.14 7 7s-3.14 7-7 7-7-3.12-7-7 3.14-7 7-7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// IssueClosed returns an "issue-closed" Octicon SVG node. +func IssueClosed() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 10h2v2H7v-2zm2-6H7v5h2V4zm1.5 1.5l-1 1L12 9l4-4.5-1-1L12 7l-1.5-1.5zM8 13.7A5.71 5.71 0 0 1 2.3 8c0-3.14 2.56-5.7 5.7-5.7 1.83 0 3.45.88 4.5 2.2l.92-.92A6.947 6.947 0 0 0 8 1C4.14 1 1 4.14 1 8s3.14 7 7 7 7-3.14 7-7l-1.52 1.52c-.66 2.41-2.86 4.19-5.48 4.19v-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// IssueOpened returns an "issue-opened" Octicon SVG node. +func IssueOpened() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7zm1 3H6v5h2V4zm0 6H6v2h2v-2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// IssueReopened returns an "issue-reopened" Octicon SVG node. +func IssueReopened() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 9H6V4h2v5zm-2 3h2v-2H6v2zm6.33-2H10l1.5 1.5c-1.05 1.33-2.67 2.2-4.5 2.2A5.71 5.71 0 0 1 1.3 8c0-.34.03-.67.09-1H.08C.03 7.33 0 7.66 0 8c0 3.86 3.14 7 7 7 2.19 0 4.13-1.02 5.41-2.59L14 14v-4h-1.67zM1.67 6H4L2.5 4.5C3.55 3.17 5.17 2.3 7 2.3c3.14 0 5.7 2.56 5.7 5.7 0 .34-.03.67-.09 1h1.31c.05-.33.08-.66.08-1 0-3.86-3.14-7-7-7-2.19 0-4.13 1.02-5.41 2.59L0 2v4h1.67z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Italic returns an "italic" Octicon SVG node. +func Italic() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M2.81 5h1.98L3 14H1l1.81-9zm.36-2.7c0-.7.58-1.3 1.33-1.3.56 0 1.13.38 1.13 1.03 0 .75-.59 1.3-1.33 1.3-.58 0-1.13-.38-1.13-1.03z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 6 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Jersey returns an "jersey" Octicon SVG node. +func Jersey() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4.5 6l-.5.5v5l.5.5h2l.5-.5v-5L6.5 6h-2zM6 11H5V7h1v4zm6.27-7.25C12.05 2.37 11.96 1.12 12 0H9.02c0 .27-.13.48-.39.69-.25.2-.63.3-1.13.3-.5 0-.88-.09-1.13-.3-.23-.2-.36-.42-.36-.69H3c.05 1.13-.03 2.38-.25 3.75C2.55 5.13 1.95 5.88 1 6v9c.02.27.11.48.31.69.2.21.42.3.69.31h11c.27-.02.48-.11.69-.31.21-.2.3-.42.31-.69V6c-.95-.13-1.53-.88-1.75-2.25h.02zM13 15H2V7c.89-.5 1.48-1.25 1.72-2.25S4.03 2.5 4 1h1c-.02.78.16 1.47.52 2.06.36.58 1.02.89 2 .94.98-.02 1.64-.33 2-.94.36-.59.5-1.28.48-2.06h1c.02 1.42.13 2.55.33 3.38.2.81.69 2 1.67 2.63v8V15zM8.5 6l-.5.5v5l.5.5h2l.5-.5v-5l-.5-.5h-2zm1.5 5H9V7h1v4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// KebabHorizontal returns an "kebab-horizontal" Octicon SVG node. +func KebabHorizontal() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M1.5 9a1.5 1.5 0 1 0 0-3 1.5 1.5 0 0 0 0 3zm5 0a1.5 1.5 0 1 0 0-3 1.5 1.5 0 0 0 0 3zM13 7.5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 13 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// KebabVertical returns an "kebab-vertical" Octicon SVG node. +func KebabVertical() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 2.5a1.5 1.5 0 1 0 3 0 1.5 1.5 0 0 0-3 0zm0 5a1.5 1.5 0 1 0 3 0 1.5 1.5 0 0 0-3 0zM1.5 14a1.5 1.5 0 1 1 0-3 1.5 1.5 0 0 1 0 3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 3 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Key returns an "key" Octicon SVG node. +func Key() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12.83 2.17C12.08 1.42 11.14 1.03 10 1c-1.13.03-2.08.42-2.83 1.17S6.04 3.86 6.01 5c0 .3.03.59.09.89L0 12v1l1 1h2l1-1v-1h1v-1h1v-1h2l1.09-1.11c.3.08.59.11.91.11 1.14-.03 2.08-.42 2.83-1.17S13.97 6.14 14 5c-.03-1.14-.42-2.08-1.17-2.83zM11 5.38c-.77 0-1.38-.61-1.38-1.38 0-.77.61-1.38 1.38-1.38.77 0 1.38.61 1.38 1.38 0 .77-.61 1.38-1.38 1.38z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Keyboard returns an "keyboard" Octicon SVG node. +func Keyboard() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 5H9V4h1v1zM3 6H2v1h1V6zm5-2H7v1h1V4zM4 4H2v1h2V4zm8 7h2v-1h-2v1zM8 7h1V6H8v1zm-4 3H2v1h2v-1zm8-6h-1v1h1V4zm2 0h-1v1h1V4zm-2 5h2V6h-2v3zm4-6v9c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V3c0-.55.45-1 1-1h14c.55 0 1 .45 1 1zm-1 0H1v9h14V3zM6 7h1V6H6v1zm0-3H5v1h1V4zM4 7h1V6H4v1zm1 4h6v-1H5v1zm5-4h1V6h-1v1zM3 8H2v1h1V8zm5 0v1h1V8H8zM6 8v1h1V8H6zM5 8H4v1h1V8zm5 1h1V8h-1v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Law returns an "law" Octicon SVG node. +func Law() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 4c-.83 0-1.5-.67-1.5-1.5S6.17 1 7 1s1.5.67 1.5 1.5S7.83 4 7 4zm7 6c0 1.11-.89 2-2 2h-1c-1.11 0-2-.89-2-2l2-4h-1c-.55 0-1-.45-1-1H8v8c.42 0 1 .45 1 1h1c.42 0 1 .45 1 1H3c0-.55.58-1 1-1h1c0-.55.58-1 1-1h.03L6 5H5c0 .55-.45 1-1 1H3l2 4c0 1.11-.89 2-2 2H2c-1.11 0-2-.89-2-2l2-4H1V5h3c0-.55.45-1 1-1h4c.55 0 1 .45 1 1h3v1h-1l2 4zM2.5 7L1 10h3L2.5 7zM13 10l-1.5-3-1.5 3h3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// LightBulb returns an "light-bulb" Octicon SVG node. +func LightBulb() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6.5 0C3.48 0 1 2.19 1 5c0 .92.55 2.25 1 3 1.34 2.25 1.78 2.78 2 4v1h5v-1c.22-1.22.66-1.75 2-4 .45-.75 1-2.08 1-3 0-2.81-2.48-5-5.5-5zm3.64 7.48c-.25.44-.47.8-.67 1.11-.86 1.41-1.25 2.06-1.45 3.23-.02.05-.02.11-.02.17H5c0-.06 0-.13-.02-.17-.2-1.17-.59-1.83-1.45-3.23-.2-.31-.42-.67-.67-1.11C2.44 6.78 2 5.65 2 5c0-2.2 2.02-4 4.5-4 1.22 0 2.36.42 3.22 1.19C10.55 2.94 11 3.94 11 5c0 .66-.44 1.78-.86 2.48zM4 14h5c-.23 1.14-1.3 2-2.5 2s-2.27-.86-2.5-2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Link returns an "link" Octicon SVG node. +func Link() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 9h1v1H4c-1.5 0-3-1.69-3-3.5S2.55 3 4 3h4c1.45 0 3 1.69 3 3.5 0 1.41-.91 2.72-2 3.25V8.59c.58-.45 1-1.27 1-2.09C10 5.22 8.98 4 8 4H4c-.98 0-2 1.22-2 2.5S3 9 4 9zm9-3h-1v1h1c1 0 2 1.22 2 2.5S13.98 12 13 12H9c-.98 0-2-1.22-2-2.5 0-.83.42-1.64 1-2.09V6.25c-1.09.53-2 1.84-2 3.25C6 11.31 7.55 13 9 13h4c1.45 0 3-1.69 3-3.5S14.5 6 13 6z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// LinkExternal returns an "link-external" Octicon SVG node. +func LinkExternal() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11 10h1v3c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V3c0-.55.45-1 1-1h3v1H1v10h10v-3zM6 2l2.25 2.25L5 7.5 6.5 9l3.25-3.25L12 8V2H6z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ListOrdered returns an "list-ordered" Octicon SVG node. +func ListOrdered() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12 12.99c0 .589 0 .998-.59.998H4.597c-.59 0-.59-.41-.59-.999 0-.59 0-.999.59-.999H11.4c.59 0 .59.41.59 1H12zM4.596 3.996H11.4c.59 0 .59-.41.59-1 0-.589 0-.999-.59-.999H4.596c-.59 0-.59.41-.59 1 0 .589 0 .999.59.999zM11.4 6.994H4.596c-.59 0-.59.41-.59 1 0 .589 0 .999.59.999H11.4c.59 0 .59-.41.59-1 0-.59 0-.999-.59-.999zM2.008 1h-.72C.99 1.19.71 1.25.26 1.34V2h.75v2.138H.17v.859h2.837v-.86h-.999V1zm.25 8.123c-.17 0-.45.03-.66.06.53-.56 1.14-1.249 1.14-1.888-.02-.78-.56-1.299-1.36-1.299-.589 0-.968.2-1.378.64l.58.579c.19-.19.38-.38.639-.38.28 0 .48.16.48.52 0 .53-.77 1.199-1.699 2.058v.58h2.998l-.09-.88h-.66l.01.01zm-.08 3.777v-.03c.44-.19.64-.47.64-.859 0-.7-.56-1.11-1.44-1.11-.479 0-.888.19-1.278.52l.55.64c.25-.2.44-.31.689-.31.27 0 .42.13.42.36 0 .27-.2.44-.86.44v.749c.83 0 .98.17.98.47 0 .25-.23.38-.58.38-.28 0-.56-.14-.81-.38l-.479.659c.3.36.77.56 1.409.56.83 0 1.529-.41 1.529-1.16 0-.5-.31-.809-.77-.939v.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ListUnordered returns an "list-unordered" Octicon SVG node. +func ListUnordered() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M2 13c0 .59 0 1-.59 1H.59C0 14 0 13.59 0 13c0-.59 0-1 .59-1h.81c.59 0 .59.41.59 1H2zm2.59-9h6.81c.59 0 .59-.41.59-1 0-.59 0-1-.59-1H4.59C4 2 4 2.41 4 3c0 .59 0 1 .59 1zM1.41 7H.59C0 7 0 7.41 0 8c0 .59 0 1 .59 1h.81c.59 0 .59-.41.59-1 0-.59 0-1-.59-1h.01zm0-5H.59C0 2 0 2.41 0 3c0 .59 0 1 .59 1h.81c.59 0 .59-.41.59-1 0-.59 0-1-.59-1h.01zm10 5H4.59C4 7 4 7.41 4 8c0 .59 0 1 .59 1h6.81c.59 0 .59-.41.59-1 0-.59 0-1-.59-1h.01zm0 5H4.59C4 12 4 12.41 4 13c0 .59 0 1 .59 1h6.81c.59 0 .59-.41.59-1 0-.59 0-1-.59-1h.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Location returns an "location" Octicon SVG node. +func Location() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 0C2.69 0 0 2.5 0 5.5 0 10.02 6 16 6 16s6-5.98 6-10.5C12 2.5 9.31 0 6 0zm0 14.55C4.14 12.52 1 8.44 1 5.5 1 3.02 3.25 1 6 1c1.34 0 2.61.48 3.56 1.36.92.86 1.44 1.97 1.44 3.14 0 2.94-3.14 7.02-5 9.05zM8 5.5c0 1.11-.89 2-2 2-1.11 0-2-.89-2-2 0-1.11.89-2 2-2 1.11 0 2 .89 2 2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Lock returns an "lock" Octicon SVG node. +func Lock() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 13H3v-1h1v1zm8-6v7c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V7c0-.55.45-1 1-1h1V4c0-2.2 1.8-4 4-4s4 1.8 4 4v2h1c.55 0 1 .45 1 1zM3.8 6h4.41V4c0-1.22-.98-2.2-2.2-2.2-1.22 0-2.2.98-2.2 2.2v2H3.8zM11 7H2v7h9V7zM4 8H3v1h1V8zm0 2H3v1h1v-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// LogoGist returns an "logo-gist" Octicon SVG node. +func LogoGist() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4.7 8.73h2.45v4.02c-.55.27-1.64.34-2.53.34-2.56 0-3.47-2.2-3.47-5.05 0-2.85.91-5.06 3.48-5.06 1.28 0 2.06.23 3.28.73V2.66C7.27 2.33 6.25 2 4.63 2 1.13 2 0 4.69 0 8.03c0 3.34 1.11 6.03 4.63 6.03 1.64 0 2.81-.27 3.59-.64V7.73H4.7v1zm6.39 3.72V6.06h-1.05v6.28c0 1.25.58 1.72 1.72 1.72v-.89c-.48 0-.67-.16-.67-.7v-.02zm.25-8.72c0-.44-.33-.78-.78-.78s-.77.34-.77.78.33.78.77.78.78-.34.78-.78zm4.34 5.69c-1.5-.13-1.78-.48-1.78-1.17 0-.77.33-1.34 1.88-1.34 1.05 0 1.66.16 2.27.36v-.94c-.69-.3-1.52-.39-2.25-.39-2.2 0-2.92 1.2-2.92 2.31 0 1.08.47 1.88 2.73 2.08 1.55.13 1.77.63 1.77 1.34 0 .73-.44 1.42-2.06 1.42-1.11 0-1.86-.19-2.33-.36v.94c.5.2 1.58.39 2.33.39 2.38 0 3.14-1.2 3.14-2.41 0-1.28-.53-2.03-2.75-2.23h-.03zm8.58-2.47v-.86h-2.42v-2.5l-1.08.31v2.11l-1.56.44v.48h1.56v5c0 1.53 1.19 2.13 2.5 2.13.19 0 .52-.02.69-.05v-.89c-.19.03-.41.03-.61.03-.97 0-1.5-.39-1.5-1.34V6.94h2.42v.02-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 25 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// LogoGitHub returns an "logo-github" Octicon SVG node. +func LogoGitHub() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M18.53 12.03h-.02c.009 0 .015.01.024.011h.006l-.01-.01zm.004.011c-.093.001-.327.05-.574.05-.78 0-1.05-.36-1.05-.83V8.13h1.59c.09 0 .16-.08.16-.19v-1.7c0-.09-.08-.17-.16-.17h-1.59V3.96c0-.08-.05-.13-.14-.13h-2.16c-.09 0-.14.05-.14.13v2.17s-1.09.27-1.16.28c-.08.02-.13.09-.13.17v1.36c0 .11.08.19.17.19h1.11v3.28c0 2.44 1.7 2.69 2.86 2.69.53 0 1.17-.17 1.27-.22.06-.02.09-.09.09-.16v-1.5a.177.177 0 0 0-.146-.18zm23.696-2.2c0-1.81-.73-2.05-1.5-1.97-.6.04-1.08.34-1.08.34v3.52s.49.34 1.22.36c1.03.03 1.36-.34 1.36-2.25zm2.43-.16c0 3.43-1.11 4.41-3.05 4.41-1.64 0-2.52-.83-2.52-.83s-.04.46-.09.52c-.03.06-.08.08-.14.08h-1.48c-.1 0-.19-.08-.19-.17l.02-11.11c0-.09.08-.17.17-.17h2.13c.09 0 .17.08.17.17v3.77s.82-.53 2.02-.53l-.01-.02c1.2 0 2.97.45 2.97 3.88zm-8.72-3.61h-2.1c-.11 0-.17.08-.17.19v5.44s-.55.39-1.3.39-.97-.34-.97-1.09V6.25c0-.09-.08-.17-.17-.17h-2.14c-.09 0-.17.08-.17.17v5.11c0 2.2 1.23 2.75 2.92 2.75 1.39 0 2.52-.77 2.52-.77s.05.39.08.45c.02.05.09.09.16.09h1.34c.11 0 .17-.08.17-.17l.02-7.47c0-.09-.08-.17-.19-.17zm-23.7-.01h-2.13c-.09 0-.17.09-.17.2v7.34c0 .2.13.27.3.27h1.92c.2 0 .25-.09.25-.27V6.23c0-.09-.08-.17-.17-.17zm-1.05-3.38c-.77 0-1.38.61-1.38 1.38 0 .77.61 1.38 1.38 1.38.75 0 1.36-.61 1.36-1.38 0-.77-.61-1.38-1.36-1.38zm16.49-.25h-2.11c-.09 0-.17.08-.17.17v4.09h-3.31V2.6c0-.09-.08-.17-.17-.17h-2.13c-.09 0-.17.08-.17.17v11.11c0 .09.09.17.17.17h2.13c.09 0 .17-.08.17-.17V8.96h3.31l-.02 4.75c0 .09.08.17.17.17h2.13c.09 0 .17-.08.17-.17V2.6c0-.09-.08-.17-.17-.17zM8.81 7.35v5.74c0 .04-.01.11-.06.13 0 0-1.25.89-3.31.89-2.49 0-5.44-.78-5.44-5.92S2.58 1.99 5.1 2c2.18 0 3.06.49 3.2.58.04.05.06.09.06.14L7.94 4.5c0 .09-.09.2-.2.17-.36-.11-.9-.33-2.17-.33-1.47 0-3.05.42-3.05 3.73s1.5 3.7 2.58 3.7c.92 0 1.25-.11 1.25-.11v-2.3H4.88c-.11 0-.19-.08-.19-.17V7.35c0-.09.08-.17.19-.17h3.74c.11 0 .19.08.19.17z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 45 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Mail returns an "mail" Octicon SVG node. +func Mail() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 4v8c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V4c0-.55-.45-1-1-1H1c-.55 0-1 .45-1 1zm13 0L7 9 1 4h12zM1 5.5l4 3-4 3v-6zM2 12l3.5-3L7 10.5 8.5 9l3.5 3H2zm11-.5l-4-3 4-3v6z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// MailRead returns an "mail-read" Octicon SVG node. +func MailRead() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 5H4V4h2v1zm3 1H4v1h5V6zm5-.48V14c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V5.52c0-.33.16-.63.42-.81L2 3.58V3c0-.55.45-1 1-1h1.2L7 0l2.8 2H11c.55 0 1 .45 1 1v.58l1.58 1.13c.27.19.42.48.42.81zM3 7.5L7 10l4-2.5V3H3v4.5zm-2 6l4.5-3-4.5-3v6zm11 .5l-5-3-5 3h10zm1-6.5l-4.5 3 4.5 3v-6z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// MarkGitHub returns an "mark-github" Octicon SVG node. +func MarkGitHub() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Markdown returns an "markdown" Octicon SVG node. +func Markdown() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M14.85 3H1.15C.52 3 0 3.52 0 4.15v7.69C0 12.48.52 13 1.15 13h13.69c.64 0 1.15-.52 1.15-1.15v-7.7C16 3.52 15.48 3 14.85 3zM9 11H7V8L5.5 9.92 4 8v3H2V5h2l1.5 2L7 5h2v6zm2.99.5L9.5 8H11V5h2v3h1.5l-2.51 3.5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Megaphone returns an "megaphone" Octicon SVG node. +func Megaphone() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 1c-.17 0-.36.05-.52.14C8.04 2.02 4.5 4.58 3 5c-1.38 0-3 .67-3 2.5S1.63 10 3 10c.3.08.64.23 1 .41V15h2v-3.45c1.34.86 2.69 1.83 3.48 2.31.16.09.34.14.52.14.52 0 1-.42 1-1V2c0-.58-.48-1-1-1zm0 12c-.38-.23-.89-.58-1.5-1-.16-.11-.33-.22-.5-.34V3.31c.16-.11.31-.2.47-.31.61-.41 1.16-.77 1.53-1v11zm2-6h4v1h-4V7zm0 2l4 2v1l-4-2V9zm4-6v1l-4 2V5l4-2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Mention returns an "mention" Octicon SVG node. +func Mention() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6.58 15c1.25 0 2.52-.31 3.56-.94l-.42-.94c-.84.52-1.89.83-3.03.83-3.23 0-5.64-2.08-5.64-5.72 0-4.37 3.23-7.18 6.58-7.18 3.45 0 5.22 2.19 5.22 5.2 0 2.39-1.34 3.86-2.5 3.86-1.05 0-1.36-.73-1.05-2.19l.73-3.75H8.98l-.11.72c-.41-.63-.94-.83-1.56-.83-2.19 0-3.66 2.39-3.66 4.38 0 1.67.94 2.61 2.3 2.61.84 0 1.67-.53 2.3-1.25.11.94.94 1.45 1.98 1.45 1.67 0 3.77-1.67 3.77-5C14 2.61 11.59 0 7.83 0 3.66 0 0 3.33 0 8.33 0 12.71 2.92 15 6.58 15zm-.31-5c-.73 0-1.36-.52-1.36-1.67 0-1.45.94-3.22 2.41-3.22.52 0 .84.2 1.25.83l-.52 3.02c-.63.73-1.25 1.05-1.78 1.05V10z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Milestone returns an "milestone" Octicon SVG node. +func Milestone() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 2H6V0h2v2zm4 5H2c-.55 0-1-.45-1-1V4c0-.55.45-1 1-1h10l2 2-2 2zM8 4H6v2h2V4zM6 16h2V8H6v8z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Mirror returns an "mirror" Octicon SVG node. +func Mirror() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15.5 4.7L8.5 0l-7 4.7c-.3.19-.5.45-.5.8V16l7.5-4 7.5 4V5.5c0-.34-.2-.61-.5-.8zm-.5 9.8l-6-3.25V10H8v1.25L2 14.5v-9l6-4V6h1V1.5l6 4v9zM6 7h5V5l3 3-3 3V9H6v2L3 8l3-3v2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// MortarBoard returns an "mortar-board" Octicon SVG node. +func MortarBoard() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7.83 9.19L4 8c-4-8 0 1.5 0 2.5S5.8 12 8 12s4-.5 4-1.5V8L8.17 9.19a.73.73 0 0 1-.36 0h.02zm.28-6.39a.34.34 0 0 0-.2 0L.27 5.18a.35.35 0 0 0 0 .67L2 6.4v1.77c-.3.17-.5.5-.5.86 0 .19.05.36.14.5-.08.14-.14.31-.14.5v2.58c0 .55 2 .55 2 0v-2.58c0-.19-.05-.36-.14-.5.08-.14.14-.31.14-.5 0-.38-.2-.69-.5-.86V6.72l4.89 1.53c.06.02.14.02.2 0l7.64-2.38a.35.35 0 0 0 0-.67L8.1 2.81l.01-.01zM8.02 6c-.55 0-1-.22-1-.5s.45-.5 1-.5 1 .22 1 .5-.45.5-1 .5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Mute returns an "mute" Octicon SVG node. +func Mute() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 2.81v10.38c0 .67-.81 1-1.28.53L3 10H1c-.55 0-1-.45-1-1V7c0-.55.45-1 1-1h2l3.72-3.72C7.19 1.81 8 2.14 8 2.81zm7.53 3.22l-1.06-1.06-1.97 1.97-1.97-1.97-1.06 1.06L11.44 8 9.47 9.97l1.06 1.06 1.97-1.97 1.97 1.97 1.06-1.06L13.56 8l1.97-1.97z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// NoNewline returns an "no-newline" Octicon SVG node. +func NoNewline() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M16 5v3c0 .55-.45 1-1 1h-3v2L9 8l3-3v2h2V5h2zM8 8c0 2.2-1.8 4-4 4s-4-1.8-4-4 1.8-4 4-4 4 1.8 4 4zM1.5 9.66L5.66 5.5C5.18 5.19 4.61 5 4 5 2.34 5 1 6.34 1 8c0 .61.19 1.17.5 1.66zM7 8c0-.61-.19-1.17-.5-1.66L2.34 10.5c.48.31 1.05.5 1.66.5 1.66 0 3-1.34 3-3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Note returns an "note" Octicon SVG node. +func Note() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M3 10h4V9H3v1zm0-2h6V7H3v1zm0-2h8V5H3v1zm10 6H1V3h12v9zM1 2c-.55 0-1 .45-1 1v9c0 .55.45 1 1 1h12c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1H1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Octoface returns an "octoface" Octicon SVG node. +func Octoface() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M14.7 5.34c.13-.32.55-1.59-.13-3.31 0 0-1.05-.33-3.44 1.3-1-.28-2.07-.32-3.13-.32s-2.13.04-3.13.32c-2.39-1.64-3.44-1.3-3.44-1.3-.68 1.72-.26 2.99-.13 3.31C.49 6.21 0 7.33 0 8.69 0 13.84 3.33 15 7.98 15S16 13.84 16 8.69c0-1.36-.49-2.48-1.3-3.35zM8 14.02c-3.3 0-5.98-.15-5.98-3.35 0-.76.38-1.48 1.02-2.07 1.07-.98 2.9-.46 4.96-.46 2.07 0 3.88-.52 4.96.46.65.59 1.02 1.3 1.02 2.07 0 3.19-2.68 3.35-5.98 3.35zM5.49 9.01c-.66 0-1.2.8-1.2 1.78s.54 1.79 1.2 1.79c.66 0 1.2-.8 1.2-1.79s-.54-1.78-1.2-1.78zm5.02 0c-.66 0-1.2.79-1.2 1.78s.54 1.79 1.2 1.79c.66 0 1.2-.8 1.2-1.79s-.53-1.78-1.2-1.78z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Organization returns an "organization" Octicon SVG node. +func Organization() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M16 12.999c0 .439-.45 1-1 1H7.995c-.539 0-.994-.447-.995-.999H1c-.54 0-1-.561-1-1 0-2.634 3-4 3-4s.229-.409 0-1c-.841-.621-1.058-.59-1-3 .058-2.419 1.367-3 2.5-3s2.442.58 2.5 3c.058 2.41-.159 2.379-1 3-.229.59 0 1 0 1s1.549.711 2.42 2.088C9.196 9.369 10 8.999 10 8.999s.229-.409 0-1c-.841-.62-1.058-.59-1-3 .058-2.419 1.367-3 2.5-3s2.437.581 2.495 3c.059 2.41-.158 2.38-1 3-.229.59 0 1 0 1s3.005 1.366 3.005 4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Package returns an "package" Octicon SVG node. +func Package() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M1 4.27v7.47c0 .45.3.84.75.97l6.5 1.73c.16.05.34.05.5 0l6.5-1.73c.45-.13.75-.52.75-.97V4.27c0-.45-.3-.84-.75-.97l-6.5-1.74a1.4 1.4 0 0 0-.5 0L1.75 3.3c-.45.13-.75.52-.75.97zm7 9.09l-6-1.59V5l6 1.61v6.75zM2 4l2.5-.67L11 5.06l-2.5.67L2 4zm13 7.77l-6 1.59V6.61l2-.55V8.5l2-.53V5.53L15 5v6.77zm-2-7.24L6.5 2.8l2-.53L15 4l-2 .53z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Paintcan returns an "paintcan" Octicon SVG node. +func Paintcan() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 0C2.69 0 0 2.69 0 6v1c0 .55.45 1 1 1v5c0 1.1 2.24 2 5 2s5-.9 5-2V8c.55 0 1-.45 1-1V6c0-3.31-2.69-6-6-6zm3 10v.5c0 .28-.22.5-.5.5s-.5-.22-.5-.5V10c0-.28-.22-.5-.5-.5s-.5.22-.5.5v2.5c0 .28-.22.5-.5.5s-.5-.22-.5-.5v-2c0-.28-.22-.5-.5-.5s-.5.22-.5.5v.5c0 .55-.45 1-1 1s-1-.45-1-1v-1c-.55 0-1-.45-1-1V7.2c.91.49 2.36.8 4 .8 1.64 0 3.09-.31 4-.8V9c0 .55-.45 1-1 1zM6 7c-1.68 0-3.12-.41-3.71-1C2.88 5.41 4.32 5 6 5c1.68 0 3.12.41 3.71 1-.59.59-2.03 1-3.71 1zm0-3c-2.76 0-5 .89-5 2 0-2.76 2.24-5 5-5s5 2.24 5 5c0-1.1-2.24-2-5-2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Pencil returns an "pencil" Octicon SVG node. +func Pencil() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 12v3h3l8-8-3-3-8 8zm3 2H1v-2h1v1h1v1zm10.3-9.3L12 6 9 3l1.3-1.3a.996.996 0 0 1 1.41 0l1.59 1.59c.39.39.39 1.02 0 1.41z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Person returns an "person" Octicon SVG node. +func Person() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12 14.002a.998.998 0 0 1-.998.998H1.001A1 1 0 0 1 0 13.999V13c0-2.633 4-4 4-4s.229-.409 0-1c-.841-.62-.944-1.59-1-4 .173-2.413 1.867-3 3-3s2.827.586 3 3c-.056 2.41-.159 3.38-1 4-.229.59 0 1 0 1s4 1.367 4 4v1.002z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Pin returns an "pin" Octicon SVG node. +func Pin() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 1.2V2l.5 1L6 6H2.2c-.44 0-.67.53-.34.86L5 10l-4 5 5-4 3.14 3.14a.5.5 0 0 0 .86-.34V10l3-4.5 1 .5h.8c.44 0 .67-.53.34-.86L10.86.86a.5.5 0 0 0-.86.34z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Plug returns an "plug" Octicon SVG node. +func Plug() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M14 6V5h-4V3H8v1H6c-1.03 0-1.77.81-2 2L3 7c-1.66 0-3 1.34-3 3v2h1v-2c0-1.11.89-2 2-2l1 1c.25 1.16.98 2 2 2h2v1h2v-2h4V9h-4V6h4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Plus returns an "plus" Octicon SVG node. +func Plus() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12 9H7v5H5V9H0V7h5V2h2v5h5v2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// PlusSmall returns an "plus-small" Octicon SVG node. +func PlusSmall() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 4H3v3H0v1h3v3h1V8h3V7H4V4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 7 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// PrimitiveDot returns an "primitive-dot" Octicon SVG node. +func PrimitiveDot() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 8c0-2.2 1.8-4 4-4s4 1.8 4 4-1.8 4-4 4-4-1.8-4-4z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 8 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// PrimitiveSquare returns an "primitive-square" Octicon SVG node. +func PrimitiveSquare() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 12H0V4h8v8z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 8 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Project returns an "project" Octicon SVG node. +func Project() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 12h3V2h-3v10zm-4-2h3V2H6v8zm-4 4h3V2H2v12zm-1 1h13V1H1v14zM14 0H1a1 1 0 0 0-1 1v14a1 1 0 0 0 1 1h13a1 1 0 0 0 1-1V1a1 1 0 0 0-1-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 15 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Pulse returns an "pulse" Octicon SVG node. +func Pulse() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11.5 8L8.8 5.4 6.6 8.5 5.5 1.6 2.38 8H0v2h3.6l.9-1.8.9 5.4L9 8.5l1.6 1.5H14V8h-2.5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Question returns an "question" Octicon SVG node. +func Question() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 10h2v2H6v-2zm4-3.5C10 8.64 8 9 8 9H6c0-.55.45-1 1-1h.5c.28 0 .5-.22.5-.5v-1c0-.28-.22-.5-.5-.5h-1c-.28 0-.5.22-.5.5V7H4c0-1.5 1.5-3 3-3s3 1 3 2.5zM7 2.3c3.14 0 5.7 2.56 5.7 5.7s-2.56 5.7-5.7 5.7A5.71 5.71 0 0 1 1.3 8c0-3.14 2.56-5.7 5.7-5.7zM7 1C3.14 1 0 4.14 0 8s3.14 7 7 7 7-3.14 7-7-3.14-7-7-7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Quote returns an "quote" Octicon SVG node. +func Quote() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6.16 3.5C3.73 5.06 2.55 6.67 2.55 9.36c.16-.05.3-.05.44-.05 1.27 0 2.5.86 2.5 2.41 0 1.61-1.03 2.61-2.5 2.61-1.9 0-2.99-1.52-2.99-4.25 0-3.8 1.75-6.53 5.02-8.42L6.16 3.5zm7 0c-2.43 1.56-3.61 3.17-3.61 5.86.16-.05.3-.05.44-.05 1.27 0 2.5.86 2.5 2.41 0 1.61-1.03 2.61-2.5 2.61-1.89 0-2.98-1.52-2.98-4.25 0-3.8 1.75-6.53 5.02-8.42l1.14 1.84h-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// RadioTower returns an "radio-tower" Octicon SVG node. +func RadioTower() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4.79 6.11c.25-.25.25-.67 0-.92-.32-.33-.48-.76-.48-1.19 0-.43.16-.86.48-1.19.25-.26.25-.67 0-.92a.613.613 0 0 0-.45-.19c-.16 0-.33.06-.45.19-.57.58-.85 1.35-.85 2.11 0 .76.29 1.53.85 2.11.25.25.66.25.9 0zM2.33.52a.651.651 0 0 0-.92 0C.48 1.48.01 2.74.01 3.99c0 1.26.47 2.52 1.4 3.48.25.26.66.26.91 0s.25-.68 0-.94c-.68-.7-1.02-1.62-1.02-2.54 0-.92.34-1.84 1.02-2.54a.66.66 0 0 0 .01-.93zm5.69 5.1A1.62 1.62 0 1 0 6.4 4c-.01.89.72 1.62 1.62 1.62zM14.59.53a.628.628 0 0 0-.91 0c-.25.26-.25.68 0 .94.68.7 1.02 1.62 1.02 2.54 0 .92-.34 1.83-1.02 2.54-.25.26-.25.68 0 .94a.651.651 0 0 0 .92 0c.93-.96 1.4-2.22 1.4-3.48A5.048 5.048 0 0 0 14.59.53zM8.02 6.92c-.41 0-.83-.1-1.2-.3l-3.15 8.37h1.49l.86-1h4l.84 1h1.49L9.21 6.62c-.38.2-.78.3-1.19.3zm-.01.48L9.02 11h-2l.99-3.6zm-1.99 5.59l1-1h2l1 1h-4zm5.19-11.1c-.25.25-.25.67 0 .92.32.33.48.76.48 1.19 0 .43-.16.86-.48 1.19-.25.26-.25.67 0 .92a.63.63 0 0 0 .9 0c.57-.58.85-1.35.85-2.11 0-.76-.28-1.53-.85-2.11a.634.634 0 0 0-.9 0z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Reply returns an "reply" Octicon SVG node. +func Reply() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 3.5c3.92.44 8 3.125 8 10-2.312-5.062-4.75-6-8-6V11L.5 5.5 6 0v3.5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Repo returns an "repo" Octicon SVG node. +func Repo() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 9H3V8h1v1zm0-3H3v1h1V6zm0-2H3v1h1V4zm0-2H3v1h1V2zm8-1v12c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1zm-1 10H1v2h2v-1h3v1h5v-2zm0-10H2v9h9V1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// RepoClone returns an "repo-clone" Octicon SVG node. +func RepoClone() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15 0H9v7c0 .55.45 1 1 1h1v1h1V8h3c.55 0 1-.45 1-1V1c0-.55-.45-1-1-1zm-4 7h-1V6h1v1zm4 0h-3V6h3v1zm0-2h-4V1h4v4zM4 5H3V4h1v1zm0-2H3V2h1v1zM2 1h6V0H1C.45 0 0 .45 0 1v12c0 .55.45 1 1 1h2v2l1.5-1.5L6 16v-2h5c.55 0 1-.45 1-1v-3H2V1zm9 10v2H6v-1H3v1H1v-2h10zM3 8h1v1H3V8zm1-1H3V6h1v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// RepoForcePush returns an "repo-force-push" Octicon SVG node. +func RepoForcePush() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 9H8v7H6V9H4l2.25-3H4l3-4 3 4H7.75L10 9zm1-9H1C.45 0 0 .45 0 1v12c0 .55.45 1 1 1h4v-1H1v-2h4v-1H2V1h9v9H9v1h2v2H9v1h2c.55 0 1-.45 1-1V1c0-.55-.45-1-1-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// RepoForked returns an "repo-forked" Octicon SVG node. +func RepoForked() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 1a1.993 1.993 0 0 0-1 3.72V6L5 8 3 6V4.72A1.993 1.993 0 0 0 2 1a1.993 1.993 0 0 0-1 3.72V6.5l3 3v1.78A1.993 1.993 0 0 0 5 15a1.993 1.993 0 0 0 1-3.72V9.5l3-3V4.72A1.993 1.993 0 0 0 8 1zM2 4.2C1.34 4.2.8 3.65.8 3c0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3 10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2zm3-10c-.66 0-1.2-.55-1.2-1.2 0-.65.55-1.2 1.2-1.2.65 0 1.2.55 1.2 1.2 0 .65-.55 1.2-1.2 1.2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// RepoPull returns an "repo-pull" Octicon SVG node. +func RepoPull() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 8V6H7V4h6V2l3 3-3 3zM4 2H3v1h1V2zm7 5h1v6c0 .55-.45 1-1 1H6v2l-1.5-1.5L3 16v-2H1c-.55 0-1-.45-1-1V1c0-.55.45-1 1-1h10c.55 0 1 .45 1 1v2h-1V1H2v9h9V7zm0 4H1v2h2v-1h3v1h5v-2zM4 6H3v1h1V6zm0-2H3v1h1V4zM3 9h1V8H3v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// RepoPush returns an "repo-push" Octicon SVG node. +func RepoPush() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 3H3V2h1v1zM3 5h1V4H3v1zm4 0L4 9h2v7h2V9h2L7 5zm4-5H1C.45 0 0 .45 0 1v12c0 .55.45 1 1 1h4v-1H1v-2h4v-1H2V1h9.02L11 10H9v1h2v2H9v1h2c.55 0 1-.45 1-1V1c0-.55-.45-1-1-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Report returns an "report" Octicon SVG node. +func Report() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 2a1 1 0 0 1 1-1h14a1 1 0 0 1 1 1v9a1 1 0 0 1-1 1H7l-4 4v-4H1a1 1 0 0 1-1-1V2zm1 0h14v9H6.5L4 13.5V11H1V2zm6 6h2v2H7V8zm0-5h2v4H7V3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Rocket returns an "rocket" Octicon SVG node. +func Rocket() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12.17 3.83c-.27-.27-.47-.55-.63-.88-.16-.31-.27-.66-.34-1.02-.58.33-1.16.7-1.73 1.13-.58.44-1.14.94-1.69 1.48-.7.7-1.33 1.81-1.78 2.45H3L0 10h3l2-2c-.34.77-1.02 2.98-1 3l1 1c.02.02 2.23-.64 3-1l-2 2v3l3-3v-3c.64-.45 1.75-1.09 2.45-1.78.55-.55 1.05-1.13 1.47-1.7.44-.58.81-1.16 1.14-1.72-.36-.08-.7-.19-1.03-.34a3.39 3.39 0 0 1-.86-.63zM16 0s-.09.38-.3 1.06c-.2.7-.55 1.58-1.06 2.66-.7-.08-1.27-.33-1.66-.72-.39-.39-.63-.94-.7-1.64C13.36.84 14.23.48 14.92.28 15.62.08 16 0 16 0z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// RSS returns an "rss" Octicon SVG node. +func RSS() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M2 13H0v-2c1.11 0 2 .89 2 2zM0 3v1a9 9 0 0 1 9 9h1C10 7.48 5.52 3 0 3zm0 4v1c2.75 0 5 2.25 5 5h1c0-3.31-2.69-6-6-6z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Ruby returns an "ruby" Octicon SVG node. +func Ruby() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 6l-5 5V4h3l2 2zm3 0l-8 8-8-8 4-4h8l4 4zm-8 6.5L14.5 6l-3-3h-7l-3 3L8 12.5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ScreenFull returns an "screen-full" Octicon SVG node. +func ScreenFull() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 10h1v3c0 .547-.453 1-1 1h-3v-1h3v-3zM1 10H0v3c0 .547.453 1 1 1h3v-1H1v-3zm0-7h3V2H1c-.547 0-1 .453-1 1v3h1V3zm1 1h10v8H2V4zm2 6h6V6H4v4zm6-8v1h3v3h1V3c0-.547-.453-1-1-1h-3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ScreenNormal returns an "screen-normal" Octicon SVG node. +func ScreenNormal() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M2 4H0V3h2V1h1v2c0 .547-.453 1-1 1zm0 8H0v1h2v2h1v-2c0-.547-.453-1-1-1zm9-2c0 .547-.453 1-1 1H4c-.547 0-1-.453-1-1V6c0-.547.453-1 1-1h6c.547 0 1 .453 1 1v4zM9 7H5v2h4V7zm2 6v2h1v-2h2v-1h-2c-.547 0-1 .453-1 1zm1-10V1h-1v2c0 .547.453 1 1 1h2V3h-2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Search returns an "search" Octicon SVG node. +func Search() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15.7 13.3l-3.81-3.83A5.93 5.93 0 0 0 13 6c0-3.31-2.69-6-6-6S1 2.69 1 6s2.69 6 6 6c1.3 0 2.48-.41 3.47-1.11l3.83 3.81c.19.2.45.3.7.3.25 0 .52-.09.7-.3a.996.996 0 0 0 0-1.41v.01zM7 10.7c-2.59 0-4.7-2.11-4.7-4.7 0-2.59 2.11-4.7 4.7-4.7 2.59 0 4.7 2.11 4.7 4.7 0 2.59-2.11 4.7-4.7 4.7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Server returns an "server" Octicon SVG node. +func Server() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11 6H1c-.55 0-1 .45-1 1v2c0 .55.45 1 1 1h10c.55 0 1-.45 1-1V7c0-.55-.45-1-1-1zM2 9H1V7h1v2zm2 0H3V7h1v2zm2 0H5V7h1v2zm2 0H7V7h1v2zm3-8H1c-.55 0-1 .45-1 1v2c0 .55.45 1 1 1h10c.55 0 1-.45 1-1V2c0-.55-.45-1-1-1zM2 4H1V2h1v2zm2 0H3V2h1v2zm2 0H5V2h1v2zm2 0H7V2h1v2zm3-1h-1V2h1v1zm0 8H1c-.55 0-1 .45-1 1v2c0 .55.45 1 1 1h10c.55 0 1-.45 1-1v-2c0-.55-.45-1-1-1zm-9 3H1v-2h1v2zm2 0H3v-2h1v2zm2 0H5v-2h1v2zm2 0H7v-2h1v2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Settings returns an "settings" Octicon SVG node. +func Settings() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4 7H3V2h1v5zm-1 7h1v-3H3v3zm5 0h1V8H8v6zm5 0h1v-2h-1v2zm1-12h-1v6h1V2zM9 2H8v2h1V2zM5 8H2c-.55 0-1 .45-1 1s.45 1 1 1h3c.55 0 1-.45 1-1s-.45-1-1-1zm5-3H7c-.55 0-1 .45-1 1s.45 1 1 1h3c.55 0 1-.45 1-1s-.45-1-1-1zm5 4h-3c-.55 0-1 .45-1 1s.45 1 1 1h3c.55 0 1-.45 1-1s-.45-1-1-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Shield returns an "shield" Octicon SVG node. +func Shield() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 0L0 2v6.02C0 12.69 5.31 16 7 16c1.69 0 7-3.31 7-7.98V2L7 0zM5 11l1.14-2.8a.568.568 0 0 0-.25-.59C5.33 7.25 5 6.66 5 6c0-1.09.89-2 1.98-2C8.06 4 9 4.91 9 6c0 .66-.33 1.25-.89 1.61-.19.13-.3.36-.25.59L9 11H5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// SignIn returns an "sign-in" Octicon SVG node. +func SignIn() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 6.75V12h4V8h1v4c0 .55-.45 1-1 1H7v3l-5.45-2.72c-.33-.17-.55-.52-.55-.91V1c0-.55.45-1 1-1h9c.55 0 1 .45 1 1v3h-1V1H3l4 2v2.25L10 3v2h4v2h-4v2L7 6.75z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// SignOut returns an "sign-out" Octicon SVG node. +func SignOut() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11.992 8.994V6.996H7.995v-2h3.997V2.999l3.998 2.998-3.998 2.998zm-1.998 2.998H5.996V2.998L2 1h7.995v2.998h1V1c0-.55-.45-.999-1-.999H.999A1.001 1.001 0 0 0 0 1v11.372c0 .39.22.73.55.91L5.996 16v-3.008h3.998c.55 0 1-.45 1-1V7.995h-1v3.997z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Smiley returns an "smiley" Octicon SVG node. +func Smiley() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 0C3.58 0 0 3.58 0 8s3.58 8 8 8 8-3.58 8-8-3.58-8-8-8zm4.81 12.81a6.72 6.72 0 0 1-2.17 1.45c-.83.36-1.72.53-2.64.53-.92 0-1.81-.17-2.64-.53-.81-.34-1.55-.83-2.17-1.45a6.773 6.773 0 0 1-1.45-2.17A6.59 6.59 0 0 1 1.21 8c0-.92.17-1.81.53-2.64.34-.81.83-1.55 1.45-2.17.62-.62 1.36-1.11 2.17-1.45A6.59 6.59 0 0 1 8 1.21c.92 0 1.81.17 2.64.53.81.34 1.55.83 2.17 1.45.62.62 1.11 1.36 1.45 2.17.36.83.53 1.72.53 2.64 0 .92-.17 1.81-.53 2.64-.34.81-.83 1.55-1.45 2.17zM4 6.8v-.59c0-.66.53-1.19 1.2-1.19h.59c.66 0 1.19.53 1.19 1.19v.59c0 .67-.53 1.2-1.19 1.2H5.2C4.53 8 4 7.47 4 6.8zm5 0v-.59c0-.66.53-1.19 1.2-1.19h.59c.66 0 1.19.53 1.19 1.19v.59c0 .67-.53 1.2-1.19 1.2h-.59C9.53 8 9 7.47 9 6.8zm4 3.2c-.72 1.88-2.91 3-5 3s-4.28-1.13-5-3c-.14-.39.23-1 .66-1h8.59c.41 0 .89.61.75 1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Squirrel returns an "squirrel" Octicon SVG node. +func Squirrel() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12 1C9.79 1 8 2.31 8 3.92c0 1.94.5 3.03 0 6.08 0-4.5-2.77-6.34-4-6.34.05-.5-.48-.66-.48-.66s-.22.11-.3.34c-.27-.31-.56-.27-.56-.27l-.13.58S.7 4.29.68 6.87c.2.33 1.53.6 2.47.43.89.05.67.79.47.99C2.78 9.13 2 8 1 8S0 9 1 9s1 1 3 1c-3.09 1.2 0 4 0 4H3c-1 0-1 1-1 1h6c3 0 5-1 5-3.47 0-.85-.43-1.79-1-2.53-1.11-1.46.23-2.68 1-2 .77.68 3 1 3-2 0-2.21-1.79-4-4-4zM2.5 6c-.28 0-.5-.22-.5-.5s.22-.5.5-.5.5.22.5.5-.22.5-.5.5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Star returns an "star" Octicon SVG node. +func Star() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M14 6l-4.9-.64L7 1 4.9 5.36 0 6l3.6 3.26L2.67 14 7 11.67 11.33 14l-.93-4.74L14 6z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Stop returns an "stop" Octicon SVG node. +func Stop() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 1H4L0 5v6l4 4h6l4-4V5l-4-4zm3 9.5L9.5 14h-5L1 10.5v-5L4.5 2h5L13 5.5v5zM6 4h2v5H6V4zm0 6h2v2H6v-2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Sync returns an "sync" Octicon SVG node. +func Sync() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10.24 7.4a4.15 4.15 0 0 1-1.2 3.6 4.346 4.346 0 0 1-5.41.54L4.8 10.4.5 9.8l.6 4.2 1.31-1.26c2.36 1.74 5.7 1.57 7.84-.54a5.876 5.876 0 0 0 1.74-4.46l-1.75-.34zM2.96 5a4.346 4.346 0 0 1 5.41-.54L7.2 5.6l4.3.6-.6-4.2-1.31 1.26c-2.36-1.74-5.7-1.57-7.85.54C.5 5.03-.06 6.65.01 8.26l1.75.35A4.17 4.17 0 0 1 2.96 5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Tag returns an "tag" Octicon SVG node. +func Tag() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7.685 1.72a2.49 2.49 0 0 0-1.76-.726H3.48A2.5 2.5 0 0 0 .994 3.48v2.456c0 .656.269 1.292.726 1.76l6.024 6.024a.99.99 0 0 0 1.402 0l4.563-4.563a.99.99 0 0 0 0-1.402L7.685 1.72zM2.366 7.048a1.54 1.54 0 0 1-.467-1.123V3.48c0-.874.716-1.58 1.58-1.58h2.456c.418 0 .825.159 1.123.467l6.104 6.094-4.702 4.702-6.094-6.114zm.626-4.066h1.989v1.989H2.982V2.982h.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Tasklist returns an "tasklist" Octicon SVG node. +func Tasklist() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15.41 9H7.59C7 9 7 8.59 7 8c0-.59 0-1 .59-1h7.81c.59 0 .59.41.59 1 0 .59 0 1-.59 1h.01zM9.59 4C9 4 9 3.59 9 3c0-.59 0-1 .59-1h5.81c.59 0 .59.41.59 1 0 .59 0 1-.59 1H9.59zM0 3.91l1.41-1.3L3 4.2 7.09 0 8.5 1.41 3 6.91l-3-3zM7.59 12h7.81c.59 0 .59.41.59 1 0 .59 0 1-.59 1H7.59C7 14 7 13.59 7 13c0-.59 0-1 .59-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Telescope returns an "telescope" Octicon SVG node. +func Telescope() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M8 9l3 6h-1l-2-4v5H7v-6l-2 5H4l2-5 2-1zM7 0H6v1h1V0zM5 3H4v1h1V3zM2 1H1v1h1V1zM.63 9a.52.52 0 0 0-.16.67l.55.92c.13.23.41.31.64.2l1.39-.66-1.16-2-1.27.86.01.01zm7.89-5.39l-5.8 3.95L3.95 9.7l6.33-3.03-1.77-3.06h.01zm4.22 1.28l-1.47-2.52a.51.51 0 0 0-.72-.17l-1.2.83 1.84 3.2 1.33-.64c.27-.13.36-.44.22-.7z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Terminal returns an "terminal" Octicon SVG node. +func Terminal() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7 10h4v1H7v-1zm-3 1l3-3-3-3-.75.75L5.5 8l-2.25 2.25L4 11zm10-8v10c0 .55-.45 1-1 1H1c-.55 0-1-.45-1-1V3c0-.55.45-1 1-1h12c.55 0 1 .45 1 1zm-1 0H1v10h12V3z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// TextSize returns an "text-size" Octicon SVG node. +func TextSize() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13.62 9.08L12.1 3.66h-.06l-1.5 5.42h3.08zM5.7 10.13S4.68 6.52 4.53 6.02h-.08l-1.13 4.11H5.7zM17.31 14h-2.25l-.95-3.25h-4.07L9.09 14H6.84l-.69-2.33H2.87L2.17 14H0l3.3-9.59h2.5l2.17 6.34L10.86 2h2.52l3.94 12h-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 18 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// ThreeBars returns an "three-bars" Octicon SVG node. +func ThreeBars() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11.41 9H.59C0 9 0 8.59 0 8c0-.59 0-1 .59-1H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1h.01zm0-4H.59C0 5 0 4.59 0 4c0-.59 0-1 .59-1H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1h.01zM.59 11H11.4c.59 0 .59.41.59 1 0 .59 0 1-.59 1H.59C0 13 0 12.59 0 12c0-.59 0-1 .59-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Thumbsdown returns an "thumbsdown" Octicon SVG node. +func Thumbsdown() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15.97 7.825L15 1.88C14.83.499 13.123 0 11.993 0H5.686c-.2 0-.38.05-.53.14L3.719 1h-1.72C.94 1 0 1.938 0 2.997v3.998c0 1.059.94 2.018 1.999 1.998h1.998c.91 0 1.39.45 2.389 1.55.91.999.88 1.798.63 3.267-.08.5.06 1 .42 1.42.39.47.979.769 1.558.769 1.83 0 2.998-3.718 2.998-5.017l-.02-.98h2.04c1.159 0 1.948-.799 1.978-1.968 0-.06.02-.13-.02-.2v-.01zm-1.969 1.19h-1.989c-.7 0-1.029.28-1.029.969l.03 1.03c0 1.268-1.17 3.997-1.999 3.997-.5 0-1.079-.5-.999-1 .25-1.579.34-2.778-.89-4.137-1.019-1.13-1.768-1.879-3.127-1.879V1.999l1.668-1h6.326c.73 0 1.95.31 2 1l.02.02.999 5.996c-.03.64-.38 1-1 1h-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Thumbsup returns an "thumbsup" Octicon SVG node. +func Thumbsup() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13.991 13.991c-.05.69-1.269 1-1.998 1H5.665l-1.669-1V7.995c1.36 0 2.11-.75 3.129-1.879 1.229-1.359 1.139-2.558.879-4.127-.08-.5.5-1 1-1 .829 0 1.998 2.729 1.998 3.998l-.02 1.03c0 .689.33.969 1.02.969H14c.63 0 .98.36 1 .999l-1 5.996-.01.01zm0-7.995h-2.018l.02-.98C11.992 3.719 10.822 0 8.993 0c-.58 0-1.169.3-1.559.77-.36.41-.5.909-.42 1.409.25 1.479.28 2.278-.629 3.278-1 1.089-1.48 1.549-2.389 1.549H2c-1.061-.01-2 .929-2 1.988v3.998c0 1.06.94 1.999 1.999 1.999h1.719l1.439.86c.16.089.33.139.52.139h6.325c1.13 0 2.839-.5 2.999-1.879l.979-5.946c.02-.08.02-.14.02-.2-.03-1.17-.84-1.969-1.999-1.969h-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Tools returns an "tools" Octicon SVG node. +func Tools() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M4.48 7.27c.26.26 1.28 1.33 1.28 1.33l.56-.58-.88-.91 1.69-1.8s-.76-.74-.43-.45c.32-1.19.03-2.51-.87-3.44C4.93.5 3.66.2 2.52.51l1.93 2-.51 1.96-1.89.52-1.93-2C-.19 4.17.1 5.48 1 6.4c.94.98 2.29 1.26 3.48.87zm6.44 1.94l-2.33 2.3 3.84 3.98c.31.33.73.49 1.14.49.41 0 .82-.16 1.14-.49.63-.65.63-1.7 0-2.35l-3.79-3.93zM16 2.53L13.55 0 6.33 7.46l.88.91-4.31 4.46-.99.53-1.39 2.27.35.37 2.2-1.44.51-1.02L7.9 9.08l.88.91L16 2.53z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Trashcan returns an "trashcan" Octicon SVG node. +func Trashcan() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11 2H9c0-.55-.45-1-1-1H5c-.55 0-1 .45-1 1H2c-.55 0-1 .45-1 1v1c0 .55.45 1 1 1v9c0 .55.45 1 1 1h7c.55 0 1-.45 1-1V5c.55 0 1-.45 1-1V3c0-.55-.45-1-1-1zm-1 12H3V5h1v8h1V5h1v8h1V5h1v8h1V5h1v9zm1-10H2V3h9v1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// TriangleDown returns an "triangle-down" Octicon SVG node. +func TriangleDown() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 5l6 6 6-6H0z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// TriangleLeft returns an "triangle-left" Octicon SVG node. +func TriangleLeft() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 2L0 8l6 6V2z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 6 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// TriangleRight returns an "triangle-right" Octicon SVG node. +func TriangleRight() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M0 14l6-6-6-6v12z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 6 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// TriangleUp returns an "triangle-up" Octicon SVG node. +func TriangleUp() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12 11L6 5l-6 6h12z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Unfold returns an "unfold" Octicon SVG node. +func Unfold() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M11.5 7.5L14 10c0 .55-.45 1-1 1H9v-1h3.5l-2-2h-7l-2 2H5v1H1c-.55 0-1-.45-1-1l2.5-2.5L0 5c0-.55.45-1 1-1h4v1H1.5l2 2h7l2-2H9V4h4c.55 0 1 .45 1 1l-2.5 2.5zM6 6h2V3h2L7 0 4 3h2v3zm2 3H6v3H4l3 3 3-3H8V9z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Unmute returns an "unmute" Octicon SVG node. +func Unmute() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M12 8.02c0 1.09-.45 2.09-1.17 2.83l-.67-.67c.55-.56.89-1.31.89-2.16 0-.85-.34-1.61-.89-2.16l.67-.67A3.99 3.99 0 0 1 12 8.02zM7.72 2.28L4 6H2c-.55 0-1 .45-1 1v2c0 .55.45 1 1 1h2l3.72 3.72c.47.47 1.28.14 1.28-.53V2.81c0-.67-.81-1-1.28-.53zm5.94.08l-.67.67a6.996 6.996 0 0 1 2.06 4.98c0 1.94-.78 3.7-2.06 4.98l.67.67A7.973 7.973 0 0 0 16 8c0-2.22-.89-4.22-2.34-5.66v.02zm-1.41 1.41l-.69.67a5.05 5.05 0 0 1 1.48 3.58c0 1.39-.56 2.66-1.48 3.56l.69.67A5.971 5.971 0 0 0 14 8.02c0-1.65-.67-3.16-1.75-4.25z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Unverified returns an "unverified" Octicon SVG node. +func Unverified() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15.67 7.066l-1.08-1.34a1.5 1.5 0 0 1-.309-.77l-.19-1.698a1.51 1.51 0 0 0-1.329-1.33l-1.699-.19c-.3-.03-.56-.159-.78-.329L8.945.33a1.504 1.504 0 0 0-1.878 0l-1.34 1.08a1.5 1.5 0 0 1-.77.31l-1.698.19c-.7.08-1.25.63-1.33 1.329l-.19 1.699c-.03.3-.159.56-.329.78L.33 7.055a1.504 1.504 0 0 0 0 1.878l1.08 1.34c.17.22.28.48.31.77l.19 1.698c.08.7.63 1.25 1.329 1.33l1.699.19c.3.03.56.159.78.329l1.339 1.08c.55.439 1.329.439 1.878 0l1.34-1.08c.22-.17.48-.28.77-.31l1.698-.19c.7-.08 1.25-.63 1.33-1.329l.19-1.699c.03-.3.159-.56.329-.78l1.08-1.339a1.504 1.504 0 0 0 0-1.878zm-6.666 4.437c0 .28-.22.5-.5.5h-.999c-.27 0-.5-.22-.5-.5v-1c0-.28.23-.5.5-.5h1c.28 0 .5.22.5.5v1zm1.56-4.887c-.06.17-.17.33-.3.47-.13.16-.14.19-.33.38-.16.17-.31.3-.52.449-.11.09-.2.19-.28.27-.08.08-.14.17-.19.27-.05.1-.08.19-.11.3-.03.11-.03.13-.03.25H7.136c0-.22 0-.31.03-.48.03-.19.08-.36.14-.52.06-.14.14-.28.25-.42.11-.13.23-.25.409-.38.27-.19.36-.3.48-.52.12-.219.2-.379.2-.589 0-.27-.06-.45-.2-.58-.13-.13-.31-.19-.58-.19-.09 0-.19.02-.3.05-.11.03-.17.09-.25.16-.08.07-.14.11-.2.2a.41.41 0 0 0-.09.28H5.028c0-.38.13-.56.27-.83.16-.27.36-.499.61-.669.25-.17.549-.3.879-.38.33-.08.7-.13 1.09-.13.439 0 .829.05 1.168.13.34.09.63.22.88.39.23.17.41.38.55.63.13.25.19.55.19.88 0 .22 0 .419-.08.589l-.02-.01z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Verified returns an "verified" Octicon SVG node. +func Verified() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M15.67 7.066l-1.08-1.34a1.5 1.5 0 0 1-.309-.77l-.19-1.698a1.51 1.51 0 0 0-1.329-1.33l-1.699-.19c-.3-.03-.56-.159-.78-.329L8.945.33a1.504 1.504 0 0 0-1.878 0l-1.34 1.08a1.5 1.5 0 0 1-.77.31l-1.698.19c-.7.08-1.25.63-1.33 1.329l-.19 1.699c-.03.3-.159.56-.329.78L.33 7.055a1.504 1.504 0 0 0 0 1.878l1.08 1.34c.17.22.28.48.31.77l.19 1.698c.08.7.63 1.25 1.329 1.33l1.699.19c.3.03.56.159.78.329l1.339 1.08c.55.439 1.329.439 1.878 0l1.34-1.08c.22-.17.48-.28.77-.31l1.698-.19c.7-.08 1.25-.63 1.33-1.329l.19-1.699c.03-.3.159-.56.329-.78l1.08-1.339a1.504 1.504 0 0 0 0-1.878zm-9.164 4.936L3.008 8.505l1.5-1.5 1.998 2 4.997-4.998 1.499 1.55-6.496 6.445z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 16 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Versions returns an "versions" Octicon SVG node. +func Versions() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M13 3H7c-.55 0-1 .45-1 1v8c0 .55.45 1 1 1h6c.55 0 1-.45 1-1V4c0-.55-.45-1-1-1zm-1 8H8V5h4v6zM4 4h1v1H4v6h1v1H4c-.55 0-1-.45-1-1V5c0-.55.45-1 1-1zM1 5h1v1H1v4h1v1H1c-.55 0-1-.45-1-1V6c0-.55.45-1 1-1z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 14 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Watch returns an "watch" Octicon SVG node. +func Watch() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M6 8h2v1H5V5h1v3zm6 0c0 2.22-1.2 4.16-3 5.19V15c0 .55-.45 1-1 1H4c-.55 0-1-.45-1-1v-1.81C1.2 12.16 0 10.22 0 8s1.2-4.16 3-5.19V1c0-.55.45-1 1-1h4c.55 0 1 .45 1 1v1.81c1.8 1.03 3 2.97 3 5.19zm-1 0c0-2.77-2.23-5-5-5S1 5.23 1 8s2.23 5 5 5 5-2.23 5-5z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// X returns an "x" Octicon SVG node. +func X() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M7.48 8l3.75 3.75-1.48 1.48L6 9.48l-3.75 3.75-1.48-1.48L4.52 8 .77 4.25l1.48-1.48L6 6.52l3.75-3.75 1.48 1.48L7.48 8z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 12 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} + +// Zap returns an "zap" Octicon SVG node. +func Zap() *html.Node { + return &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: &html.Node{ + Parent: (*html.Node)(nil), + FirstChild: (*html.Node)(nil), + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(0), + Data: (string)("path"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("d"), + Val: (string)("M10 7H6l3-7-9 9h4l-3 7 9-9z"), + }, + }, + }, + LastChild: (*html.Node)(nil), + PrevSibling: (*html.Node)(nil), + NextSibling: (*html.Node)(nil), + Type: (html.NodeType)(3), + DataAtom: (atom.Atom)(462339), + Data: (string)("svg"), + Namespace: (string)("svg"), + Attr: []html.Attribute{ + { + Namespace: (string)(""), + Key: (string)("xmlns"), + Val: (string)("http://www.w3.org/2000/svg"), + }, + { + Namespace: (string)(""), + Key: (string)("width"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("height"), + Val: (string)("16"), + }, + { + Namespace: (string)(""), + Key: (string)("viewBox"), + Val: (string)("0 0 10 16"), + }, + { + Namespace: (string)(""), + Key: (string)("style"), + Val: (string)("fill: currentColor; vertical-align: top;"), + }, + }, + } +} diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml b/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml new file mode 100644 index 0000000..93b1fcd --- /dev/null +++ b/vendor/github.com/shurcooL/sanitized_anchor_name/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go +go: + - 1.x + - master +matrix: + allow_failures: + - go: master + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE b/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE new file mode 100644 index 0000000..c35c17a --- /dev/null +++ b/vendor/github.com/shurcooL/sanitized_anchor_name/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2015 Dmitri Shuralyov + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/README.md b/vendor/github.com/shurcooL/sanitized_anchor_name/README.md new file mode 100644 index 0000000..670bf0f --- /dev/null +++ b/vendor/github.com/shurcooL/sanitized_anchor_name/README.md @@ -0,0 +1,36 @@ +sanitized_anchor_name +===================== + +[![Build Status](https://travis-ci.org/shurcooL/sanitized_anchor_name.svg?branch=master)](https://travis-ci.org/shurcooL/sanitized_anchor_name) [![GoDoc](https://godoc.org/github.com/shurcooL/sanitized_anchor_name?status.svg)](https://godoc.org/github.com/shurcooL/sanitized_anchor_name) + +Package sanitized_anchor_name provides a func to create sanitized anchor names. + +Its logic can be reused by multiple packages to create interoperable anchor names +and links to those anchors. + +At this time, it does not try to ensure that generated anchor names +are unique, that responsibility falls on the caller. + +Installation +------------ + +```bash +go get -u github.com/shurcooL/sanitized_anchor_name +``` + +Example +------- + +```Go +anchorName := sanitized_anchor_name.Create("This is a header") + +fmt.Println(anchorName) + +// Output: +// this-is-a-header +``` + +License +------- + +- [MIT License](LICENSE) diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/go.mod b/vendor/github.com/shurcooL/sanitized_anchor_name/go.mod new file mode 100644 index 0000000..1e25534 --- /dev/null +++ b/vendor/github.com/shurcooL/sanitized_anchor_name/go.mod @@ -0,0 +1 @@ +module github.com/shurcooL/sanitized_anchor_name diff --git a/vendor/github.com/shurcooL/sanitized_anchor_name/main.go b/vendor/github.com/shurcooL/sanitized_anchor_name/main.go new file mode 100644 index 0000000..6a77d12 --- /dev/null +++ b/vendor/github.com/shurcooL/sanitized_anchor_name/main.go @@ -0,0 +1,29 @@ +// Package sanitized_anchor_name provides a func to create sanitized anchor names. +// +// Its logic can be reused by multiple packages to create interoperable anchor names +// and links to those anchors. +// +// At this time, it does not try to ensure that generated anchor names +// are unique, that responsibility falls on the caller. +package sanitized_anchor_name // import "github.com/shurcooL/sanitized_anchor_name" + +import "unicode" + +// Create returns a sanitized anchor name for the given text. +func Create(text string) string { + var anchorName []rune + var futureDash = false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/sourcegraph/annotate/.travis.yml b/vendor/github.com/sourcegraph/annotate/.travis.yml new file mode 100644 index 0000000..036daec --- /dev/null +++ b/vendor/github.com/sourcegraph/annotate/.travis.yml @@ -0,0 +1,4 @@ +language: go + +go: + - 1.2 diff --git a/vendor/github.com/sourcegraph/annotate/LICENSE b/vendor/github.com/sourcegraph/annotate/LICENSE new file mode 100644 index 0000000..d8cf637 --- /dev/null +++ b/vendor/github.com/sourcegraph/annotate/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013, Sourcegraph, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. Redistributions +in binary form must reproduce the above copyright notice, this list of +conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +Neither the name of Sourcegraph nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/sourcegraph/annotate/Makefile b/vendor/github.com/sourcegraph/annotate/Makefile new file mode 100644 index 0000000..016aed1 --- /dev/null +++ b/vendor/github.com/sourcegraph/annotate/Makefile @@ -0,0 +1,9 @@ +.PHONY: benchmark.txt + +benchmark.txt: + go test -test.run='^$$' -test.bench='.*' -test.benchmem > $@ 2>&1 + cat $@ + +profile: + go test -test.run='^$$' -test.bench='.*' -test.cpuprofile=/tmp/annotate.prof + go tool pprof ./annotate.test /tmp/annotate.prof diff --git a/vendor/github.com/sourcegraph/annotate/README.md b/vendor/github.com/sourcegraph/annotate/README.md new file mode 100644 index 0000000..7fb2759 --- /dev/null +++ b/vendor/github.com/sourcegraph/annotate/README.md @@ -0,0 +1,10 @@ +# annotate + +A Go package for applying multiple sets of annotations to a region of text. + +**[Documentation on Sourcegraph](https://sourcegraph.com/github.com/sourcegraph/annotate)** + +[![Build Status](https://travis-ci.org/sourcegraph/annotate.png?branch=master)](https://travis-ci.org/sourcegraph/annotate) +[![status](https://sourcegraph.com/api/repos/github.com/sourcegraph/annotate/badges/status.png)](https://sourcegraph.com/github.com/sourcegraph/annotate) +[![authors](https://sourcegraph.com/api/repos/github.com/sourcegraph/annotate/badges/authors.png)](https://sourcegraph.com/github.com/sourcegraph/annotate) +[![Total views](https://sourcegraph.com/api/repos/github.com/sourcegraph/annotate/counters/views.png)](https://sourcegraph.com/github.com/sourcegraph/annotate) diff --git a/vendor/github.com/sourcegraph/annotate/annotate.go b/vendor/github.com/sourcegraph/annotate/annotate.go new file mode 100644 index 0000000..627dfe9 --- /dev/null +++ b/vendor/github.com/sourcegraph/annotate/annotate.go @@ -0,0 +1,159 @@ +package annotate + +import ( + "bytes" + "errors" + "io" +) + +type Annotation struct { + // Start and End byte offsets (not rune offsets). + Start, End int + + Left, Right []byte + WantInner int +} + +type Annotations []*Annotation + +func (a Annotations) Len() int { return len(a) } +func (a Annotations) Less(i, j int) bool { + // Sort by start position, breaking ties by preferring longer + // matches. + ai, aj := a[i], a[j] + if ai.Start == aj.Start { + if ai.End == aj.End { + return ai.WantInner < aj.WantInner + } + return ai.End > aj.End + } else { + return ai.Start < aj.Start + } +} +func (a Annotations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Annotates src with annotations in anns. +// +// Annotating an empty byte array always returns an empty byte array. +// +// Assumes anns is sorted (using sort.Sort(anns)). +func Annotate(src []byte, anns Annotations, writeContent func(io.Writer, []byte)) ([]byte, error) { + out := bytes.NewBuffer(make([]byte, 0, len(src)+20*len(anns))) + var err error + + // Keep a stack of open annotations (i.e., that have been opened and not yet + // closed). + var open []*Annotation + + for b := range src { + // Open annotations that begin here. + for i, a := range anns { + if a.Start < 0 || a.Start == b { + if a.Start < 0 { + err = ErrStartOutOfBounds + } + + out.Write(a.Left) + + if a.Start == a.End { + out.Write(a.Right) + } else { + // Put this annotation on the stack of annotations that will need + // to be closed. We remove it from anns at the end of the loop + // (to avoid modifying anns while we're iterating over it). + open = append(open, a) + } + } else if a.Start > b { + // Remove all annotations that we opened (we already put them on the + // stack of annotations that will need to be closed). + anns = anns[i:] + break + } + } + + if writeContent == nil { + out.Write(src[b : b+1]) + } else { + writeContent(out, src[b:b+1]) + } + + // Close annotations that end after this byte, handling overlapping + // elements as described below. Elements of open are ordered by their + // annotation start position. + + // We need to close all annotatations ending after this byte, as well as + // annotations that overlap this annotation's end and should reopen + // after it closes. + var toClose []*Annotation + + // Find annotations ending after this byte. + minStart := 0 // start of the leftmost annotation closing here + for i := len(open) - 1; i >= 0; i-- { + a := open[i] + if a.End == b+1 { + toClose = append(toClose, a) + if minStart == 0 || a.Start < minStart { + minStart = a.Start + } + open = append(open[:i], open[i+1:]...) + } + } + + // Find annotations that overlap annotations closing after this and + // that should reopen after it closes. + if toClose != nil { + for i := len(open) - 1; i >= 0; i-- { + if a := open[i]; a.Start > minStart { + out.Write(a.Right) + } + } + } + + for _, a := range toClose { + out.Write(a.Right) + } + + if toClose != nil { + for _, a := range open { + if a.Start > minStart { + out.Write(a.Left) + } + } + } + } + + if len(open) > 0 { + if err == ErrStartOutOfBounds { + err = ErrStartAndEndOutOfBounds + } else { + err = ErrEndOutOfBounds + } + + // Clean up by closing unclosed annotations, in the order they would + // have been closed in. + for i := len(open) - 1; i >= 0; i-- { + a := open[i] + out.Write(a.Right) + } + } + + return out.Bytes(), err +} + +var ( + ErrStartOutOfBounds = errors.New("annotation start out of bounds") + ErrEndOutOfBounds = errors.New("annotation end out of bounds") + ErrStartAndEndOutOfBounds = errors.New("annotations start and end out of bounds") +) + +func IsOutOfBounds(err error) bool { + return err == ErrStartOutOfBounds || err == ErrEndOutOfBounds || err == ErrStartAndEndOutOfBounds +} + +func annLefts(as []*Annotation) []string { + ls := make([]string, len(as)) + for i, a := range as { + ls[i] = string(a.Left) + } + return ls +} diff --git a/vendor/github.com/sourcegraph/annotate/benchmark.txt b/vendor/github.com/sourcegraph/annotate/benchmark.txt new file mode 100644 index 0000000..44c5ba0 --- /dev/null +++ b/vendor/github.com/sourcegraph/annotate/benchmark.txt @@ -0,0 +1,3 @@ +PASS +BenchmarkAnnotate-12 5000 648826 ns/op 82921 B/op 979 allocs/op +ok github.com/sourcegraph/annotate 3.316s diff --git a/vendor/github.com/sourcegraph/syntaxhighlight/.travis.yml b/vendor/github.com/sourcegraph/syntaxhighlight/.travis.yml new file mode 100644 index 0000000..7cc29ff --- /dev/null +++ b/vendor/github.com/sourcegraph/syntaxhighlight/.travis.yml @@ -0,0 +1,14 @@ +language: go +go_import_path: github.com/sourcegraph/syntaxhighlight +go: + - 1.6.x + - 1.7.x + - 1.8.x + - master +install: + - go get -t -v ./... +script: + - diff -u <(echo -n) <(gofmt -d ./) + - go tool vet -composites=false ./*.go + - go tool vet -composites=false ./cmd/ + - go test -v -race -bench=. -benchmem ./... diff --git a/vendor/github.com/sourcegraph/syntaxhighlight/LICENSE b/vendor/github.com/sourcegraph/syntaxhighlight/LICENSE new file mode 100644 index 0000000..d8cf637 --- /dev/null +++ b/vendor/github.com/sourcegraph/syntaxhighlight/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013, Sourcegraph, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. Redistributions +in binary form must reproduce the above copyright notice, this list of +conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +Neither the name of Sourcegraph nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/sourcegraph/syntaxhighlight/Makefile b/vendor/github.com/sourcegraph/syntaxhighlight/Makefile new file mode 100644 index 0000000..63d8ecf --- /dev/null +++ b/vendor/github.com/sourcegraph/syntaxhighlight/Makefile @@ -0,0 +1,5 @@ +.PHONY: benchmark.txt + +benchmark.txt: + go test -test.run='^$$' -test.bench='.*' -test.benchmem > $@ 2>&1 + cat $@ diff --git a/vendor/github.com/sourcegraph/syntaxhighlight/README.md b/vendor/github.com/sourcegraph/syntaxhighlight/README.md new file mode 100644 index 0000000..7bfad10 --- /dev/null +++ b/vendor/github.com/sourcegraph/syntaxhighlight/README.md @@ -0,0 +1,68 @@ +# syntaxhighlight + +Package syntaxhighlight provides syntax highlighting for code. It currently uses a language-independent lexer and performs decently on JavaScript, Java, Ruby, Python, Go, and C. + +The main [`AsHTML(src []byte) ([]byte, error)`](https://sourcegraph.com/sourcegraph.com/sourcegraph/syntaxhighlight@master/.GoPackage/sourcegraph.com/sourcegraph/syntaxhighlight/.def/AsHTML) function outputs HTML that uses the same CSS classes as [google-code-prettify](https://code.google.com/p/google-code-prettify/), so any stylesheets for that should also work with this package. + +**[Documentation on Sourcegraph](https://sourcegraph.com/github.com/sourcegraph/syntaxhighlight)** + +[![Build Status](https://travis-ci.org/sourcegraph/syntaxhighlight.png?branch=master)](https://travis-ci.org/sourcegraph/syntaxhighlight) +[![status](https://sourcegraph.com/api/repos/github.com/sourcegraph/syntaxhighlight/badges/status.png)](https://sourcegraph.com/github.com/sourcegraph/syntaxhighlight) + +## Installation + +``` +go get -u github.com/sourcegraph/syntaxhighlight +``` +First you should install the golang evironment, you can download it [here](https://golang.org/dl) or you can follow the [getting started](https://golang.org/doc/install) + +Remember you should set the environment variables correctly (GOPATH and PATH) + +## Example usage + +The function [`AsHTML(src []byte, options ...Option) ([]byte, error)`](https://sourcegraph.com/sourcegraph.com/sourcegraph/syntaxhighlight@master/.GoPackage/sourcegraph.com/sourcegraph/syntaxhighlight/.def/AsHTML) returns an HTML-highlighted version of `src`. The input source code can be in any language; the lexer is language independent. An `OrderedList()` option can be passed to produce an `
    ...
`-wrapped list to display line numbers. + +```go +package syntaxhighlight_test + +import ( + "fmt" + "os" + + "github.com/sourcegraph/syntaxhighlight" +) + +func Example() { + src := []byte(` +/* hello, world! */ +var a = 3; + +// b is a cool function +function b() { + return 7; +}`) + + highlighted, err := syntaxhighlight.AsHTML(src) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + + fmt.Println(string(highlighted)) + + // Output: + // /* hello, world! */ + // var a = 3; + // + // // b is a cool function + // function b() { + // return 7; + // } +} +``` + +## Contributors + +* [Quinn Slack](https://sourcegraph.com/sqs) + +Contributions are welcome! Submit a pull request on GitHub. diff --git a/vendor/github.com/sourcegraph/syntaxhighlight/benchmark.txt b/vendor/github.com/sourcegraph/syntaxhighlight/benchmark.txt new file mode 100644 index 0000000..2923a0f --- /dev/null +++ b/vendor/github.com/sourcegraph/syntaxhighlight/benchmark.txt @@ -0,0 +1,3 @@ +PASS +BenchmarkAnnotate-12 20000 80528 ns/op 18192 B/op 428 allocs/op +ok github.com/sourcegraph/syntaxhighlight 2.424s diff --git a/vendor/github.com/sourcegraph/syntaxhighlight/highlight.go b/vendor/github.com/sourcegraph/syntaxhighlight/highlight.go new file mode 100644 index 0000000..9ff794c --- /dev/null +++ b/vendor/github.com/sourcegraph/syntaxhighlight/highlight.go @@ -0,0 +1,295 @@ +// Package syntaxhighlight provides syntax highlighting for code. It currently +// uses a language-independent lexer and performs decently on JavaScript, Java, +// Ruby, Python, Go, and C. +package syntaxhighlight + +import ( + "bytes" + "io" + "strings" + "text/scanner" + "text/template" + "unicode" + "unicode/utf8" + + "github.com/sourcegraph/annotate" +) + +// Kind represents a syntax highlighting kind (class) which will be assigned to tokens. +// A syntax highlighting scheme (style) maps text style properties to each token kind. +type Kind uint8 + +// A set of supported highlighting kinds +const ( + Whitespace Kind = iota + String + Keyword + Comment + Type + Literal + Punctuation + Plaintext + Tag + HTMLTag + HTMLAttrName + HTMLAttrValue + Decimal +) + +//go:generate gostringer -type=Kind + +// Printer implements an interface to render highlighted output +// (see HTMLPrinter for the implementation of this interface) +type Printer interface { + Print(w io.Writer, kind Kind, tokText string) error +} + +// HTMLConfig holds the HTML class configuration to be used by annotators when +// highlighting code. +type HTMLConfig struct { + String string + Keyword string + Comment string + Type string + Literal string + Punctuation string + Plaintext string + Tag string + HTMLTag string + HTMLAttrName string + HTMLAttrValue string + Decimal string + Whitespace string + + AsOrderedList bool +} + +// HTMLPrinter implements Printer interface and is used to produce +// HTML-based highligher +type HTMLPrinter HTMLConfig + +// Class returns the set class for a given token Kind. +func (c HTMLConfig) Class(kind Kind) string { + switch kind { + case String: + return c.String + case Keyword: + return c.Keyword + case Comment: + return c.Comment + case Type: + return c.Type + case Literal: + return c.Literal + case Punctuation: + return c.Punctuation + case Plaintext: + return c.Plaintext + case Tag: + return c.Tag + case HTMLTag: + return c.HTMLTag + case HTMLAttrName: + return c.HTMLAttrName + case HTMLAttrValue: + return c.HTMLAttrValue + case Decimal: + return c.Decimal + } + return "" +} + +// Print is the function that emits highlighted source code using +// ... wrapper tags +func (p HTMLPrinter) Print(w io.Writer, kind Kind, tokText string) error { + if p.AsOrderedList { + if i := strings.Index(tokText, "\n"); i > -1 { + if err := p.Print(w, kind, tokText[:i]); err != nil { + return err + } + w.Write([]byte("\n
  • ")) + if err := p.Print(w, kind, tokText[i+1:]); err != nil { + return err + } + return nil + } + } + + class := ((HTMLConfig)(p)).Class(kind) + if class != "" { + _, err := w.Write([]byte(``)) + if err != nil { + return err + } + } + template.HTMLEscape(w, []byte(tokText)) + if class != "" { + _, err := w.Write([]byte(``)) + if err != nil { + return err + } + } + return nil +} + +type Annotator interface { + Annotate(start int, kind Kind, tokText string) (*annotate.Annotation, error) +} + +type HTMLAnnotator HTMLConfig + +func (a HTMLAnnotator) Annotate(start int, kind Kind, tokText string) (*annotate.Annotation, error) { + class := ((HTMLConfig)(a)).Class(kind) + if class != "" { + left := []byte(``)...) + return &annotate.Annotation{ + Start: start, End: start + len(tokText), + Left: left, Right: []byte(""), + }, nil + } + return nil, nil +} + +// Option is a type of the function that can modify +// one or more of the options in the HTMLConfig structure. +type Option func(options *HTMLConfig) + +// OrderedList allows you to format the output as an ordered list +// to have line numbers in the output. +// +// Example: +// AsHTML(input, OrderedList()) +func OrderedList() Option { + return func(o *HTMLConfig) { + o.AsOrderedList = true + } +} + +// DefaultHTMLConfig provides class names that match those of google-code-prettify +// (https://code.google.com/p/google-code-prettify/). +var DefaultHTMLConfig = HTMLConfig{ + String: "str", + Keyword: "kwd", + Comment: "com", + Type: "typ", + Literal: "lit", + Punctuation: "pun", + Plaintext: "pln", + Tag: "tag", + HTMLTag: "htm", + HTMLAttrName: "atn", + HTMLAttrValue: "atv", + Decimal: "dec", + Whitespace: "", +} + +func Print(s *scanner.Scanner, w io.Writer, p Printer) error { + tok := s.Scan() + for tok != scanner.EOF { + tokText := s.TokenText() + err := p.Print(w, tokenKind(tok, tokText), tokText) + if err != nil { + return err + } + + tok = s.Scan() + } + + return nil +} + +func Annotate(src []byte, a Annotator) (annotate.Annotations, error) { + s := NewScanner(src) + + var anns annotate.Annotations + read := 0 + + tok := s.Scan() + for tok != scanner.EOF { + tokText := s.TokenText() + + ann, err := a.Annotate(read, tokenKind(tok, tokText), tokText) + if err != nil { + return nil, err + } + read += len(tokText) + if ann != nil { + anns = append(anns, ann) + } + + tok = s.Scan() + } + + return anns, nil +} + +// AsHTML converts source code into an HTML-highlighted version; +// It accepts optional configuration parameters to control rendering +// (see OrderedList as one example) +func AsHTML(src []byte, options ...Option) ([]byte, error) { + opt := DefaultHTMLConfig + for _, f := range options { + f(&opt) + } + + var buf bytes.Buffer + if opt.AsOrderedList { + buf.Write([]byte("
      \n
    1. ")) + } + err := Print(NewScanner(src), &buf, HTMLPrinter(opt)) + if opt.AsOrderedList { + buf.Write([]byte("
    2. \n
    ")) + } + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// NewScanner is a helper that takes a []byte src, wraps it in a reader and creates a Scanner. +func NewScanner(src []byte) *scanner.Scanner { + return NewScannerReader(bytes.NewReader(src)) +} + +// NewScannerReader takes a reader src and creates a Scanner. +func NewScannerReader(src io.Reader) *scanner.Scanner { + var s scanner.Scanner + s.Init(src) + s.Error = func(_ *scanner.Scanner, _ string) {} + s.Whitespace = 0 + s.Mode = s.Mode ^ scanner.SkipComments + return &s +} + +func tokenKind(tok rune, tokText string) Kind { + switch tok { + case scanner.Ident: + if _, isKW := keywords[tokText]; isKW { + return Keyword + } + if r, _ := utf8.DecodeRuneInString(tokText); unicode.IsUpper(r) { + return Type + } + return Plaintext + case scanner.Float, scanner.Int: + return Decimal + case scanner.Char, scanner.String, scanner.RawString: + return String + case scanner.Comment: + return Comment + } + if unicode.IsSpace(tok) { + return Whitespace + } + return Punctuation +} diff --git a/vendor/github.com/sourcegraph/syntaxhighlight/keywords.go b/vendor/github.com/sourcegraph/syntaxhighlight/keywords.go new file mode 100644 index 0000000..85729d6 --- /dev/null +++ b/vendor/github.com/sourcegraph/syntaxhighlight/keywords.go @@ -0,0 +1,177 @@ +package syntaxhighlight + +var keywords = map[string]struct{}{ + "BEGIN": {}, + "END": {}, + "False": {}, + "Infinity": {}, + "NaN": {}, + "None": {}, + "True": {}, + "abstract": {}, + "alias": {}, + "align_union": {}, + "alignof": {}, + "and": {}, + "append": {}, + "as": {}, + "asm": {}, + "assert": {}, + "auto": {}, + "axiom": {}, + "begin": {}, + "bool": {}, + "boolean": {}, + "break": {}, + "byte": {}, + "caller": {}, + "case": {}, + "catch": {}, + "char": {}, + "class": {}, + "concept": {}, + "concept_map": {}, + "const": {}, + "const_cast": {}, + "constexpr": {}, + "continue": {}, + "debugger": {}, + "decltype": {}, + "def": {}, + "default": {}, + "defined": {}, + "del": {}, + "delegate": {}, + "delete": {}, + "die": {}, + "do": {}, + "double": {}, + "dump": {}, + "dynamic_cast": {}, + "elif": {}, + "else": {}, + "elsif": {}, + "end": {}, + "ensure": {}, + "enum": {}, + "eval": {}, + "except": {}, + "exec": {}, + "exit": {}, + "explicit": {}, + "export": {}, + "extends": {}, + "extern": {}, + "false": {}, + "final": {}, + "finally": {}, + "float": {}, + "float32": {}, + "float64": {}, + "for": {}, + "foreach": {}, + "friend": {}, + "from": {}, + "func": {}, + "function": {}, + "generic": {}, + "get": {}, + "global": {}, + "goto": {}, + "if": {}, + "implements": {}, + "import": {}, + "in": {}, + "inline": {}, + "instanceof": {}, + "int": {}, + "int8": {}, + "int16": {}, + "int32": {}, + "int64": {}, + "interface": {}, + "is": {}, + "lambda": {}, + "last": {}, + "late_check": {}, + "local": {}, + "long": {}, + "make": {}, + "map": {}, + "module": {}, + "mutable": {}, + "my": {}, + "namespace": {}, + "native": {}, + "new": {}, + "next": {}, + "nil": {}, + "no": {}, + "nonlocal": {}, + "not": {}, + "null": {}, + "nullptr": {}, + "operator": {}, + "or": {}, + "our": {}, + "package": {}, + "pass": {}, + "print": {}, + "private": {}, + "property": {}, + "protected": {}, + "public": {}, + "raise": {}, + "redo": {}, + "register": {}, + "reinterpret_cast": {}, + "require": {}, + "rescue": {}, + "retry": {}, + "return": {}, + "self": {}, + "set": {}, + "short": {}, + "signed": {}, + "sizeof": {}, + "static": {}, + "static_assert": {}, + "static_cast": {}, + "strictfp": {}, + "struct": {}, + "sub": {}, + "super": {}, + "switch": {}, + "synchronized": {}, + "template": {}, + "then": {}, + "this": {}, + "throw": {}, + "throws": {}, + "transient": {}, + "true": {}, + "try": {}, + "type": {}, + "typedef": {}, + "typeid": {}, + "typename": {}, + "typeof": {}, + "undef": {}, + "undefined": {}, + "union": {}, + "unless": {}, + "unsigned": {}, + "until": {}, + "use": {}, + "using": {}, + "var": {}, + "virtual": {}, + "void": {}, + "volatile": {}, + "wantarray": {}, + "when": {}, + "where": {}, + "while": {}, + "with": {}, + "yield": {}, +} diff --git a/vendor/github.com/sourcegraph/syntaxhighlight/kind_gostring.go b/vendor/github.com/sourcegraph/syntaxhighlight/kind_gostring.go new file mode 100644 index 0000000..2fc73ec --- /dev/null +++ b/vendor/github.com/sourcegraph/syntaxhighlight/kind_gostring.go @@ -0,0 +1,16 @@ +// generated by gostringer -type=Kind; DO NOT EDIT + +package syntaxhighlight + +import "fmt" + +const _Kind_name = "WhitespaceStringKeywordCommentTypeLiteralPunctuationPlaintextTagHTMLTagHTMLAttrNameHTMLAttrValueDecimal" + +var _Kind_index = [...]uint8{0, 10, 16, 23, 30, 34, 41, 52, 61, 64, 71, 83, 96, 103} + +func (i Kind) GoString() string { + if i+1 >= Kind(len(_Kind_index)) { + return fmt.Sprintf("syntaxhighlight.Kind(%d)", i) + } + return "syntaxhighlight." + _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/github.com/sqlitebrowser/blackfriday/.gitignore b/vendor/github.com/sqlitebrowser/blackfriday/.gitignore new file mode 100644 index 0000000..75623dc --- /dev/null +++ b/vendor/github.com/sqlitebrowser/blackfriday/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/sqlitebrowser/blackfriday/.travis.yml b/vendor/github.com/sqlitebrowser/blackfriday/.travis.yml new file mode 100644 index 0000000..a1687f1 --- /dev/null +++ b/vendor/github.com/sqlitebrowser/blackfriday/.travis.yml @@ -0,0 +1,30 @@ +sudo: false +language: go +go: + - 1.5.4 + - 1.6.2 + - tip +matrix: + include: + - go: 1.2.2 + script: + - go get -t -v ./... + - go test -v -race ./... + - go: 1.3.3 + script: + - go get -t -v ./... + - go test -v -race ./... + - go: 1.4.3 + script: + - go get -t -v ./... + - go test -v -race ./... + allow_failures: + - go: tip + fast_finish: true +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/sqlitebrowser/blackfriday/LICENSE.txt b/vendor/github.com/sqlitebrowser/blackfriday/LICENSE.txt new file mode 100644 index 0000000..2885af3 --- /dev/null +++ b/vendor/github.com/sqlitebrowser/blackfriday/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sqlitebrowser/blackfriday/README.md b/vendor/github.com/sqlitebrowser/blackfriday/README.md new file mode 100644 index 0000000..0d1ac9a --- /dev/null +++ b/vendor/github.com/sqlitebrowser/blackfriday/README.md @@ -0,0 +1,292 @@ +Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday) [![GoDoc](https://godoc.org/github.com/russross/blackfriday?status.svg)](https://godoc.org/github.com/russross/blackfriday) +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. An experimental LaTeX output engine is also included. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with Go 1. If you are using an older +release of Go, consider using v1.1 of blackfriday, which was based +on the last stable release of Go prior to Go 1. You can find it as a +tagged commit on github. + +With Go 1 and git installed: + + go get github.com/russross/blackfriday + +will download, compile, and install the package into your `$GOPATH` +directory hierarchy. Alternatively, you can achieve the same if you +import it into a project: + + import "github.com/russross/blackfriday" + +and `go get` without parameters. + +Usage +----- + +For basic usage, it is as simple as getting your input into a byte +slice and calling: + + output := blackfriday.MarkdownBasic(input) + +This renders it with no extensions enabled. To get a more useful +feature set, use this instead: + + output := blackfriday.MarkdownCommon(input) + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running blackfriday's output +through HTML sanitizer such as +[Bluemonday](https://github.com/microcosm-cc/bluemonday). + +Here's an example of simple usage of blackfriday together with bluemonday: + +``` go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday" +) + +// ... +unsafe := blackfriday.MarkdownCommon(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options + +If you want to customize the set of options, first get a renderer +(currently either the HTML or LaTeX output engines), then use it to +call the more general `Markdown` function. For examples, see the +implementations of `MarkdownBasic` and `MarkdownCommon` in +`markdown.go`. + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ``` go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ``` go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled (it is off by + default in the `MarkdownBasic` and `MarkdownCommon` convenience + functions), newlines in the input translate into line breaks in + the output. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable header anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* LaTeX output: renders output as LaTeX. This is currently part of the + main Blackfriday repository, but may be split into its own project + in the future. If you are interested in owning and maintaining the + LaTeX output component, please be in touch. + + It renders some basic documents, but is only experimental at this + point. In particular, it does not do any inline escaping, so input + that happens to look like LaTeX code will be passed through without + modification. + +* [Md2Vim](https://github.com/FooSoft/md2vim): transforms markdown files into vimdoc format. + + +Todo +---- + +* More unit testing +* Improve unicode support. It does not understand all unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all utf-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: http://daringfireball.net/projects/markdown/ "Markdown" + [2]: http://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" diff --git a/vendor/github.com/sqlitebrowser/blackfriday/block.go b/vendor/github.com/sqlitebrowser/blackfriday/block.go new file mode 100644 index 0000000..7fc731d --- /dev/null +++ b/vendor/github.com/sqlitebrowser/blackfriday/block.go @@ -0,0 +1,1450 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "unicode" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
    + // ... + //
    + if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
    tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
    tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (*parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. +func isFenceLine(data []byte, syntax *string, oldmarker string, newlineOptional bool) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the syntax, and discard it if the caller doesn't care. + if syntax != nil { + syn := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + syntaxStart := i + + if data[i] == '{' { + i++ + syntaxStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + syn++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for syn > 0 && isspace(data[syntaxStart]) { + syntaxStart++ + syn-- + } + + for syn > 0 && isspace(data[syntaxStart+syn-1]) { + syn-- + } + + i++ + } else { + for i < len(data) && !isspace(data[i]) { + syn++ + i++ + } + } + + *syntax = string(data[syntaxStart : syntaxStart+syn]) + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { + var syntax string + beg, marker := isFenceLine(data, &syntax, "", false) + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + newlineOptional := !doRender + fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), syntax) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + // If reached end of data, the Renderer.ListItem call we're going to make below + // is definitely the last in the list. + if line >= len(data) { + *flags |= LIST_ITEM_END_OF_LIST + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCodeBlock(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/sqlitebrowser/blackfriday/doc.go b/vendor/github.com/sqlitebrowser/blackfriday/doc.go new file mode 100644 index 0000000..9656c42 --- /dev/null +++ b/vendor/github.com/sqlitebrowser/blackfriday/doc.go @@ -0,0 +1,32 @@ +// Package blackfriday is a Markdown processor. +// +// It translates plain text with simple formatting rules into HTML or LaTeX. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that preceed the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/sqlitebrowser/blackfriday/html.go b/vendor/github.com/sqlitebrowser/blackfriday/html.go new file mode 100644 index 0000000..bb9268f --- /dev/null +++ b/vendor/github.com/sqlitebrowser/blackfriday/html.go @@ -0,0 +1,950 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded