+
+{{ end }}
+```
+
+#### Custom Template renderer
+
+You can also use your own html template render
+
+```go
+import "html/template"
+
+func main() {
+ router := gin.Default()
+ html := template.Must(template.ParseFiles("file1", "file2"))
+ router.SetHTMLTemplate(html)
+ router.Run(":8080")
+}
+```
+
+#### Custom Delimiters
+
+You may use custom delims
+
+```go
+ r := gin.Default()
+ r.Delims("{[{", "}]}")
+ r.LoadHTMLGlob("/path/to/templates")
+```
+
+#### Custom Template Funcs
+
+See the detail [example code](https://github.com/gin-gonic/examples/tree/master/template).
+
+main.go
+
+```go
+import (
+ "fmt"
+ "html/template"
+ "net/http"
+ "time"
+
+ "github.com/gin-gonic/gin"
+)
+
+func formatAsDate(t time.Time) string {
+ year, month, day := t.Date()
+ return fmt.Sprintf("%d%02d/%02d", year, month, day)
+}
+
+func main() {
+ router := gin.Default()
+ router.Delims("{[{", "}]}")
+ router.SetFuncMap(template.FuncMap{
+ "formatAsDate": formatAsDate,
+ })
+ router.LoadHTMLFiles("./testdata/template/raw.tmpl")
+
+ router.GET("/raw", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "raw.tmpl", gin.H{
+ "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC),
+ })
+ })
+
+ router.Run(":8080")
+}
+
+```
+
+raw.tmpl
+
+```html
+Date: {[{.now | formatAsDate}]}
+```
+
+Result:
+```
+Date: 2017/07/01
+```
+
+### Multitemplate
+
+Gin allow by default use only one html.Template. Check [a multitemplate render](https://github.com/gin-contrib/multitemplate) for using features like go 1.6 `block template`.
+
+### Redirects
+
+Issuing a HTTP redirect is easy. Both internal and external locations are supported.
+
+```go
+r.GET("/test", func(c *gin.Context) {
+ c.Redirect(http.StatusMovedPermanently, "http://www.google.com/")
+})
+```
+
+Issuing a HTTP redirect from POST. Refer to issue: [#444](https://github.com/gin-gonic/gin/issues/444)
+```go
+r.POST("/test", func(c *gin.Context) {
+ c.Redirect(http.StatusFound, "/foo")
+})
+```
+
+Issuing a Router redirect, use `HandleContext` like below.
+
+``` go
+r.GET("/test", func(c *gin.Context) {
+ c.Request.URL.Path = "/test2"
+ r.HandleContext(c)
+})
+r.GET("/test2", func(c *gin.Context) {
+ c.JSON(200, gin.H{"hello": "world"})
+})
+```
+
+
+### Custom Middleware
+
+```go
+func Logger() gin.HandlerFunc {
+ return func(c *gin.Context) {
+ t := time.Now()
+
+ // Set example variable
+ c.Set("example", "12345")
+
+ // before request
+
+ c.Next()
+
+ // after request
+ latency := time.Since(t)
+ log.Print(latency)
+
+ // access the status we are sending
+ status := c.Writer.Status()
+ log.Println(status)
+ }
+}
+
+func main() {
+ r := gin.New()
+ r.Use(Logger())
+
+ r.GET("/test", func(c *gin.Context) {
+ example := c.MustGet("example").(string)
+
+ // it would print: "12345"
+ log.Println(example)
+ })
+
+ // Listen and serve on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+### Using BasicAuth() middleware
+
+```go
+// simulate some private data
+var secrets = gin.H{
+ "foo": gin.H{"email": "foo@bar.com", "phone": "123433"},
+ "austin": gin.H{"email": "austin@example.com", "phone": "666"},
+ "lena": gin.H{"email": "lena@guapa.com", "phone": "523443"},
+}
+
+func main() {
+ r := gin.Default()
+
+ // Group using gin.BasicAuth() middleware
+ // gin.Accounts is a shortcut for map[string]string
+ authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{
+ "foo": "bar",
+ "austin": "1234",
+ "lena": "hello2",
+ "manu": "4321",
+ }))
+
+ // /admin/secrets endpoint
+ // hit "localhost:8080/admin/secrets
+ authorized.GET("/secrets", func(c *gin.Context) {
+ // get user, it was set by the BasicAuth middleware
+ user := c.MustGet(gin.AuthUserKey).(string)
+ if secret, ok := secrets[user]; ok {
+ c.JSON(http.StatusOK, gin.H{"user": user, "secret": secret})
+ } else {
+ c.JSON(http.StatusOK, gin.H{"user": user, "secret": "NO SECRET :("})
+ }
+ })
+
+ // Listen and serve on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+### Goroutines inside a middleware
+
+When starting new Goroutines inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy.
+
+```go
+func main() {
+ r := gin.Default()
+
+ r.GET("/long_async", func(c *gin.Context) {
+ // create copy to be used inside the goroutine
+ cCp := c.Copy()
+ go func() {
+ // simulate a long task with time.Sleep(). 5 seconds
+ time.Sleep(5 * time.Second)
+
+ // note that you are using the copied context "cCp", IMPORTANT
+ log.Println("Done! in path " + cCp.Request.URL.Path)
+ }()
+ })
+
+ r.GET("/long_sync", func(c *gin.Context) {
+ // simulate a long task with time.Sleep(). 5 seconds
+ time.Sleep(5 * time.Second)
+
+ // since we are NOT using a goroutine, we do not have to copy the context
+ log.Println("Done! in path " + c.Request.URL.Path)
+ })
+
+ // Listen and serve on 0.0.0.0:8080
+ r.Run(":8080")
+}
+```
+
+### Custom HTTP configuration
+
+Use `http.ListenAndServe()` directly, like this:
+
+```go
+func main() {
+ router := gin.Default()
+ http.ListenAndServe(":8080", router)
+}
+```
+or
+
+```go
+func main() {
+ router := gin.Default()
+
+ s := &http.Server{
+ Addr: ":8080",
+ Handler: router,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ MaxHeaderBytes: 1 << 20,
+ }
+ s.ListenAndServe()
+}
+```
+
+### Support Let's Encrypt
+
+example for 1-line LetsEncrypt HTTPS servers.
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/gin-gonic/autotls"
+ "github.com/gin-gonic/gin"
+)
+
+func main() {
+ r := gin.Default()
+
+ // Ping handler
+ r.GET("/ping", func(c *gin.Context) {
+ c.String(200, "pong")
+ })
+
+ log.Fatal(autotls.Run(r, "example1.com", "example2.com"))
+}
+```
+
+example for custom autocert manager.
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/gin-gonic/autotls"
+ "github.com/gin-gonic/gin"
+ "golang.org/x/crypto/acme/autocert"
+)
+
+func main() {
+ r := gin.Default()
+
+ // Ping handler
+ r.GET("/ping", func(c *gin.Context) {
+ c.String(200, "pong")
+ })
+
+ m := autocert.Manager{
+ Prompt: autocert.AcceptTOS,
+ HostPolicy: autocert.HostWhitelist("example1.com", "example2.com"),
+ Cache: autocert.DirCache("/var/www/.cache"),
+ }
+
+ log.Fatal(autotls.RunWithManager(r, &m))
+}
+```
+
+### Run multiple service using Gin
+
+See the [question](https://github.com/gin-gonic/gin/issues/346) and try the following example:
+
+```go
+package main
+
+import (
+ "log"
+ "net/http"
+ "time"
+
+ "github.com/gin-gonic/gin"
+ "golang.org/x/sync/errgroup"
+)
+
+var (
+ g errgroup.Group
+)
+
+func router01() http.Handler {
+ e := gin.New()
+ e.Use(gin.Recovery())
+ e.GET("/", func(c *gin.Context) {
+ c.JSON(
+ http.StatusOK,
+ gin.H{
+ "code": http.StatusOK,
+ "error": "Welcome server 01",
+ },
+ )
+ })
+
+ return e
+}
+
+func router02() http.Handler {
+ e := gin.New()
+ e.Use(gin.Recovery())
+ e.GET("/", func(c *gin.Context) {
+ c.JSON(
+ http.StatusOK,
+ gin.H{
+ "code": http.StatusOK,
+ "error": "Welcome server 02",
+ },
+ )
+ })
+
+ return e
+}
+
+func main() {
+ server01 := &http.Server{
+ Addr: ":8080",
+ Handler: router01(),
+ ReadTimeout: 5 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ }
+
+ server02 := &http.Server{
+ Addr: ":8081",
+ Handler: router02(),
+ ReadTimeout: 5 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ }
+
+ g.Go(func() error {
+ err := server01.ListenAndServe()
+ if err != nil && err != http.ErrServerClosed {
+ log.Fatal(err)
+ }
+ return err
+ })
+
+ g.Go(func() error {
+ err := server02.ListenAndServe()
+ if err != nil && err != http.ErrServerClosed {
+ log.Fatal(err)
+ }
+ return err
+ })
+
+ if err := g.Wait(); err != nil {
+ log.Fatal(err)
+ }
+}
+```
+
+### Graceful shutdown or restart
+
+There are a few approaches you can use to perform a graceful shutdown or restart. You can make use of third-party packages specifically built for that, or you can manually do the same with the functions and methods from the built-in packages.
+
+#### Third-party packages
+
+We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer to issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details.
+
+```go
+router := gin.Default()
+router.GET("/", handler)
+// [...]
+endless.ListenAndServe(":4242", router)
+```
+
+Alternatives:
+
+* [manners](https://github.com/braintree/manners): A polite Go HTTP server that shuts down gracefully.
+* [graceful](https://github.com/tylerb/graceful): Graceful is a Go package enabling graceful shutdown of an http.Handler server.
+* [grace](https://github.com/facebookgo/grace): Graceful restart & zero downtime deploy for Go servers.
+
+#### Manually
+
+In case you are using Go 1.8 or a later version, you may not need to use those libraries. Consider using `http.Server`'s built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown) method for graceful shutdowns. The example below describes its usage, and we've got more examples using gin [here](https://github.com/gin-gonic/examples/tree/master/graceful-shutdown).
+
+```go
+// +build go1.8
+
+package main
+
+import (
+ "context"
+ "log"
+ "net/http"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ "github.com/gin-gonic/gin"
+)
+
+func main() {
+ router := gin.Default()
+ router.GET("/", func(c *gin.Context) {
+ time.Sleep(5 * time.Second)
+ c.String(http.StatusOK, "Welcome Gin Server")
+ })
+
+ srv := &http.Server{
+ Addr: ":8080",
+ Handler: router,
+ }
+
+ // Initializing the server in a goroutine so that
+ // it won't block the graceful shutdown handling below
+ go func() {
+ if err := srv.ListenAndServe(); err != nil && errors.Is(err, http.ErrServerClosed) {
+ log.Printf("listen: %s\n", err)
+ }
+ }()
+
+ // Wait for interrupt signal to gracefully shutdown the server with
+ // a timeout of 5 seconds.
+ quit := make(chan os.Signal)
+ // kill (no param) default send syscall.SIGTERM
+ // kill -2 is syscall.SIGINT
+ // kill -9 is syscall.SIGKILL but can't be catch, so don't need add it
+ signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
+ <-quit
+ log.Println("Shutting down server...")
+
+ // The context is used to inform the server it has 5 seconds to finish
+ // the request it is currently handling
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ if err := srv.Shutdown(ctx); err != nil {
+ log.Fatal("Server forced to shutdown:", err)
+ }
+
+ log.Println("Server exiting")
+}
+```
+
+### Build a single binary with templates
+
+You can build a server into a single binary containing templates by using [go-assets][].
+
+[go-assets]: https://github.com/jessevdk/go-assets
+
+```go
+func main() {
+ r := gin.New()
+
+ t, err := loadTemplate()
+ if err != nil {
+ panic(err)
+ }
+ r.SetHTMLTemplate(t)
+
+ r.GET("/", func(c *gin.Context) {
+ c.HTML(http.StatusOK, "/html/index.tmpl",nil)
+ })
+ r.Run(":8080")
+}
+
+// loadTemplate loads templates embedded by go-assets-builder
+func loadTemplate() (*template.Template, error) {
+ t := template.New("")
+ for name, file := range Assets.Files {
+ defer file.Close()
+ if file.IsDir() || !strings.HasSuffix(name, ".tmpl") {
+ continue
+ }
+ h, err := ioutil.ReadAll(file)
+ if err != nil {
+ return nil, err
+ }
+ t, err = t.New(name).Parse(string(h))
+ if err != nil {
+ return nil, err
+ }
+ }
+ return t, nil
+}
+```
+
+See a complete example in the `https://github.com/gin-gonic/examples/tree/master/assets-in-binary` directory.
+
+### Bind form-data request with custom struct
+
+The follow example using custom struct:
+
+```go
+type StructA struct {
+ FieldA string `form:"field_a"`
+}
+
+type StructB struct {
+ NestedStruct StructA
+ FieldB string `form:"field_b"`
+}
+
+type StructC struct {
+ NestedStructPointer *StructA
+ FieldC string `form:"field_c"`
+}
+
+type StructD struct {
+ NestedAnonyStruct struct {
+ FieldX string `form:"field_x"`
+ }
+ FieldD string `form:"field_d"`
+}
+
+func GetDataB(c *gin.Context) {
+ var b StructB
+ c.Bind(&b)
+ c.JSON(200, gin.H{
+ "a": b.NestedStruct,
+ "b": b.FieldB,
+ })
+}
+
+func GetDataC(c *gin.Context) {
+ var b StructC
+ c.Bind(&b)
+ c.JSON(200, gin.H{
+ "a": b.NestedStructPointer,
+ "c": b.FieldC,
+ })
+}
+
+func GetDataD(c *gin.Context) {
+ var b StructD
+ c.Bind(&b)
+ c.JSON(200, gin.H{
+ "x": b.NestedAnonyStruct,
+ "d": b.FieldD,
+ })
+}
+
+func main() {
+ r := gin.Default()
+ r.GET("/getb", GetDataB)
+ r.GET("/getc", GetDataC)
+ r.GET("/getd", GetDataD)
+
+ r.Run()
+}
+```
+
+Using the command `curl` command result:
+
+```
+$ curl "http://localhost:8080/getb?field_a=hello&field_b=world"
+{"a":{"FieldA":"hello"},"b":"world"}
+$ curl "http://localhost:8080/getc?field_a=hello&field_c=world"
+{"a":{"FieldA":"hello"},"c":"world"}
+$ curl "http://localhost:8080/getd?field_x=hello&field_d=world"
+{"d":"world","x":{"FieldX":"hello"}}
+```
+
+### Try to bind body into different structs
+
+The normal methods for binding request body consumes `c.Request.Body` and they
+cannot be called multiple times.
+
+```go
+type formA struct {
+ Foo string `json:"foo" xml:"foo" binding:"required"`
+}
+
+type formB struct {
+ Bar string `json:"bar" xml:"bar" binding:"required"`
+}
+
+func SomeHandler(c *gin.Context) {
+ objA := formA{}
+ objB := formB{}
+ // This c.ShouldBind consumes c.Request.Body and it cannot be reused.
+ if errA := c.ShouldBind(&objA); errA == nil {
+ c.String(http.StatusOK, `the body should be formA`)
+ // Always an error is occurred by this because c.Request.Body is EOF now.
+ } else if errB := c.ShouldBind(&objB); errB == nil {
+ c.String(http.StatusOK, `the body should be formB`)
+ } else {
+ ...
+ }
+}
+```
+
+For this, you can use `c.ShouldBindBodyWith`.
+
+```go
+func SomeHandler(c *gin.Context) {
+ objA := formA{}
+ objB := formB{}
+ // This reads c.Request.Body and stores the result into the context.
+ if errA := c.ShouldBindBodyWith(&objA, binding.JSON); errA == nil {
+ c.String(http.StatusOK, `the body should be formA`)
+ // At this time, it reuses body stored in the context.
+ } else if errB := c.ShouldBindBodyWith(&objB, binding.JSON); errB == nil {
+ c.String(http.StatusOK, `the body should be formB JSON`)
+ // And it can accepts other formats
+ } else if errB2 := c.ShouldBindBodyWith(&objB, binding.XML); errB2 == nil {
+ c.String(http.StatusOK, `the body should be formB XML`)
+ } else {
+ ...
+ }
+}
+```
+
+* `c.ShouldBindBodyWith` stores body into the context before binding. This has
+a slight impact to performance, so you should not use this method if you are
+enough to call binding at once.
+* This feature is only needed for some formats -- `JSON`, `XML`, `MsgPack`,
+`ProtoBuf`. For other formats, `Query`, `Form`, `FormPost`, `FormMultipart`,
+can be called by `c.ShouldBind()` multiple times without any damage to
+performance (See [#1341](https://github.com/gin-gonic/gin/pull/1341)).
+
+### http2 server push
+
+http.Pusher is supported only **go1.8+**. See the [golang blog](https://blog.golang.org/h2push) for detail information.
+
+```go
+package main
+
+import (
+ "html/template"
+ "log"
+
+ "github.com/gin-gonic/gin"
+)
+
+var html = template.Must(template.New("https").Parse(`
+
+
+ Https Test
+
+
+
+
Welcome, Ginner!
+
+
+`))
+
+func main() {
+ r := gin.Default()
+ r.Static("/assets", "./assets")
+ r.SetHTMLTemplate(html)
+
+ r.GET("/", func(c *gin.Context) {
+ if pusher := c.Writer.Pusher(); pusher != nil {
+ // use pusher.Push() to do server push
+ if err := pusher.Push("/assets/app.js", nil); err != nil {
+ log.Printf("Failed to push: %v", err)
+ }
+ }
+ c.HTML(200, "https", gin.H{
+ "status": "success",
+ })
+ })
+
+ // Listen and Server in https://127.0.0.1:8080
+ r.RunTLS(":8080", "./testdata/server.pem", "./testdata/server.key")
+}
+```
+
+### Define format for the log of routes
+
+The default log of routes is:
+```
+[GIN-debug] POST /foo --> main.main.func1 (3 handlers)
+[GIN-debug] GET /bar --> main.main.func2 (3 handlers)
+[GIN-debug] GET /status --> main.main.func3 (3 handlers)
+```
+
+If you want to log this information in given format (e.g. JSON, key values or something else), then you can define this format with `gin.DebugPrintRouteFunc`.
+In the example below, we log all routes with standard log package but you can use another log tools that suits of your needs.
+```go
+import (
+ "log"
+ "net/http"
+
+ "github.com/gin-gonic/gin"
+)
+
+func main() {
+ r := gin.Default()
+ gin.DebugPrintRouteFunc = func(httpMethod, absolutePath, handlerName string, nuHandlers int) {
+ log.Printf("endpoint %v %v %v %v\n", httpMethod, absolutePath, handlerName, nuHandlers)
+ }
+
+ r.POST("/foo", func(c *gin.Context) {
+ c.JSON(http.StatusOK, "foo")
+ })
+
+ r.GET("/bar", func(c *gin.Context) {
+ c.JSON(http.StatusOK, "bar")
+ })
+
+ r.GET("/status", func(c *gin.Context) {
+ c.JSON(http.StatusOK, "ok")
+ })
+
+ // Listen and Server in http://0.0.0.0:8080
+ r.Run()
+}
+```
+
+### Set and get a cookie
+
+```go
+import (
+ "fmt"
+
+ "github.com/gin-gonic/gin"
+)
+
+func main() {
+
+ router := gin.Default()
+
+ router.GET("/cookie", func(c *gin.Context) {
+
+ cookie, err := c.Cookie("gin_cookie")
+
+ if err != nil {
+ cookie = "NotSet"
+ c.SetCookie("gin_cookie", "test", 3600, "/", "localhost", false, true)
+ }
+
+ fmt.Printf("Cookie value: %s \n", cookie)
+ })
+
+ router.Run()
+}
+```
+
+## Don't trust all proxies
+
+Gin lets you specify which headers to hold the real client IP (if any),
+as well as specifying which proxies (or direct clients) you trust to
+specify one of these headers.
+
+The `TrustedProxies` slice on your `gin.Engine` specifes network addresses or
+network CIDRs from where clients which their request headers related to client
+IP can be trusted. They can be IPv4 addresses, IPv4 CIDRs, IPv6 addresses or
+IPv6 CIDRs.
+
+```go
+import (
+ "fmt"
+
+ "github.com/gin-gonic/gin"
+)
+
+func main() {
+
+ router := gin.Default()
+ router.TrustedProxies = []string{"192.168.1.2"}
+
+ router.GET("/", func(c *gin.Context) {
+ // If the client is 192.168.1.2, use the X-Forwarded-For
+ // header to deduce the original client IP from the trust-
+ // worthy parts of that header.
+ // Otherwise, simply return the direct client IP
+ fmt.Printf("ClientIP: %s\n", c.ClientIP())
+ })
+ router.Run()
+}
+```
+
+## Testing
+
+The `net/http/httptest` package is preferable way for HTTP testing.
+
+```go
+package main
+
+func setupRouter() *gin.Engine {
+ r := gin.Default()
+ r.GET("/ping", func(c *gin.Context) {
+ c.String(200, "pong")
+ })
+ return r
+}
+
+func main() {
+ r := setupRouter()
+ r.Run(":8080")
+}
+```
+
+Test for code example above:
+
+```go
+package main
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestPingRoute(t *testing.T) {
+ router := setupRouter()
+
+ w := httptest.NewRecorder()
+ req, _ := http.NewRequest("GET", "/ping", nil)
+ router.ServeHTTP(w, req)
+
+ assert.Equal(t, 200, w.Code)
+ assert.Equal(t, "pong", w.Body.String())
+}
+```
+
+## Users
+
+Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framework.
+
+* [gorush](https://github.com/appleboy/gorush): A push notification server written in Go.
+* [fnproject](https://github.com/fnproject/fn): The container native, cloud agnostic serverless platform.
+* [photoprism](https://github.com/photoprism/photoprism): Personal photo management powered by Go and Google TensorFlow.
+* [krakend](https://github.com/devopsfaith/krakend): Ultra performant API Gateway with middlewares.
+* [picfit](https://github.com/thoas/picfit): An image resizing server written in Go.
+* [brigade](https://github.com/brigadecore/brigade): Event-based Scripting for Kubernetes.
+* [dkron](https://github.com/distribworks/dkron): Distributed, fault tolerant job scheduling system.
diff --git a/vendor/github.com/gin-gonic/gin/auth.go b/vendor/github.com/gin-gonic/gin/auth.go
new file mode 100644
index 0000000..4d8a6ce
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/auth.go
@@ -0,0 +1,91 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "crypto/subtle"
+ "encoding/base64"
+ "net/http"
+ "strconv"
+
+ "github.com/gin-gonic/gin/internal/bytesconv"
+)
+
+// AuthUserKey is the cookie name for user credential in basic auth.
+const AuthUserKey = "user"
+
+// Accounts defines a key/value for user/pass list of authorized logins.
+type Accounts map[string]string
+
+type authPair struct {
+ value string
+ user string
+}
+
+type authPairs []authPair
+
+func (a authPairs) searchCredential(authValue string) (string, bool) {
+ if authValue == "" {
+ return "", false
+ }
+ for _, pair := range a {
+ if subtle.ConstantTimeCompare([]byte(pair.value), []byte(authValue)) == 1 {
+ return pair.user, true
+ }
+ }
+ return "", false
+}
+
+// BasicAuthForRealm returns a Basic HTTP Authorization middleware. It takes as arguments a map[string]string where
+// the key is the user name and the value is the password, as well as the name of the Realm.
+// If the realm is empty, "Authorization Required" will be used by default.
+// (see http://tools.ietf.org/html/rfc2617#section-1.2)
+func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc {
+ if realm == "" {
+ realm = "Authorization Required"
+ }
+ realm = "Basic realm=" + strconv.Quote(realm)
+ pairs := processAccounts(accounts)
+ return func(c *Context) {
+ // Search user in the slice of allowed credentials
+ user, found := pairs.searchCredential(c.requestHeader("Authorization"))
+ if !found {
+ // Credentials doesn't match, we return 401 and abort handlers chain.
+ c.Header("WWW-Authenticate", realm)
+ c.AbortWithStatus(http.StatusUnauthorized)
+ return
+ }
+
+ // The user credentials was found, set user's id to key AuthUserKey in this context, the user's id can be read later using
+ // c.MustGet(gin.AuthUserKey).
+ c.Set(AuthUserKey, user)
+ }
+}
+
+// BasicAuth returns a Basic HTTP Authorization middleware. It takes as argument a map[string]string where
+// the key is the user name and the value is the password.
+func BasicAuth(accounts Accounts) HandlerFunc {
+ return BasicAuthForRealm(accounts, "")
+}
+
+func processAccounts(accounts Accounts) authPairs {
+ length := len(accounts)
+ assert1(length > 0, "Empty list of authorized credentials")
+ pairs := make(authPairs, 0, length)
+ for user, password := range accounts {
+ assert1(user != "", "User can not be empty")
+ value := authorizationHeader(user, password)
+ pairs = append(pairs, authPair{
+ value: value,
+ user: user,
+ })
+ }
+ return pairs
+}
+
+func authorizationHeader(user, password string) string {
+ base := user + ":" + password
+ return "Basic " + base64.StdEncoding.EncodeToString(bytesconv.StringToBytes(base))
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/binding.go b/vendor/github.com/gin-gonic/gin/binding/binding.go
new file mode 100644
index 0000000..5caeb58
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/binding.go
@@ -0,0 +1,118 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+//go:build !nomsgpack
+// +build !nomsgpack
+
+package binding
+
+import "net/http"
+
+// Content-Type MIME of the most common data formats.
+const (
+ MIMEJSON = "application/json"
+ MIMEHTML = "text/html"
+ MIMEXML = "application/xml"
+ MIMEXML2 = "text/xml"
+ MIMEPlain = "text/plain"
+ MIMEPOSTForm = "application/x-www-form-urlencoded"
+ MIMEMultipartPOSTForm = "multipart/form-data"
+ MIMEPROTOBUF = "application/x-protobuf"
+ MIMEMSGPACK = "application/x-msgpack"
+ MIMEMSGPACK2 = "application/msgpack"
+ MIMEYAML = "application/x-yaml"
+)
+
+// Binding describes the interface which needs to be implemented for binding the
+// data present in the request such as JSON request body, query parameters or
+// the form POST.
+type Binding interface {
+ Name() string
+ Bind(*http.Request, interface{}) error
+}
+
+// BindingBody adds BindBody method to Binding. BindBody is similar with Bind,
+// but it reads the body from supplied bytes instead of req.Body.
+type BindingBody interface {
+ Binding
+ BindBody([]byte, interface{}) error
+}
+
+// BindingUri adds BindUri method to Binding. BindUri is similar with Bind,
+// but it read the Params.
+type BindingUri interface {
+ Name() string
+ BindUri(map[string][]string, interface{}) error
+}
+
+// StructValidator is the minimal interface which needs to be implemented in
+// order for it to be used as the validator engine for ensuring the correctness
+// of the request. Gin provides a default implementation for this using
+// https://github.com/go-playground/validator/tree/v8.18.2.
+type StructValidator interface {
+ // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right.
+ // If the received type is a slice|array, the validation should be performed travel on every element.
+ // If the received type is not a struct or slice|array, any validation should be skipped and nil must be returned.
+ // If the received type is a struct or pointer to a struct, the validation should be performed.
+ // If the struct is not valid or the validation itself fails, a descriptive error should be returned.
+ // Otherwise nil must be returned.
+ ValidateStruct(interface{}) error
+
+ // Engine returns the underlying validator engine which powers the
+ // StructValidator implementation.
+ Engine() interface{}
+}
+
+// Validator is the default validator which implements the StructValidator
+// interface. It uses https://github.com/go-playground/validator/tree/v8.18.2
+// under the hood.
+var Validator StructValidator = &defaultValidator{}
+
+// These implement the Binding interface and can be used to bind the data
+// present in the request to struct instances.
+var (
+ JSON = jsonBinding{}
+ XML = xmlBinding{}
+ Form = formBinding{}
+ Query = queryBinding{}
+ FormPost = formPostBinding{}
+ FormMultipart = formMultipartBinding{}
+ ProtoBuf = protobufBinding{}
+ MsgPack = msgpackBinding{}
+ YAML = yamlBinding{}
+ Uri = uriBinding{}
+ Header = headerBinding{}
+)
+
+// Default returns the appropriate Binding instance based on the HTTP method
+// and the content type.
+func Default(method, contentType string) Binding {
+ if method == http.MethodGet {
+ return Form
+ }
+
+ switch contentType {
+ case MIMEJSON:
+ return JSON
+ case MIMEXML, MIMEXML2:
+ return XML
+ case MIMEPROTOBUF:
+ return ProtoBuf
+ case MIMEMSGPACK, MIMEMSGPACK2:
+ return MsgPack
+ case MIMEYAML:
+ return YAML
+ case MIMEMultipartPOSTForm:
+ return FormMultipart
+ default: // case MIMEPOSTForm:
+ return Form
+ }
+}
+
+func validate(obj interface{}) error {
+ if Validator == nil {
+ return nil
+ }
+ return Validator.ValidateStruct(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go b/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go
new file mode 100644
index 0000000..9afa3dc
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go
@@ -0,0 +1,112 @@
+// Copyright 2020 Gin Core Team. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+//go:build nomsgpack
+// +build nomsgpack
+
+package binding
+
+import "net/http"
+
+// Content-Type MIME of the most common data formats.
+const (
+ MIMEJSON = "application/json"
+ MIMEHTML = "text/html"
+ MIMEXML = "application/xml"
+ MIMEXML2 = "text/xml"
+ MIMEPlain = "text/plain"
+ MIMEPOSTForm = "application/x-www-form-urlencoded"
+ MIMEMultipartPOSTForm = "multipart/form-data"
+ MIMEPROTOBUF = "application/x-protobuf"
+ MIMEYAML = "application/x-yaml"
+)
+
+// Binding describes the interface which needs to be implemented for binding the
+// data present in the request such as JSON request body, query parameters or
+// the form POST.
+type Binding interface {
+ Name() string
+ Bind(*http.Request, interface{}) error
+}
+
+// BindingBody adds BindBody method to Binding. BindBody is similar with Bind,
+// but it reads the body from supplied bytes instead of req.Body.
+type BindingBody interface {
+ Binding
+ BindBody([]byte, interface{}) error
+}
+
+// BindingUri adds BindUri method to Binding. BindUri is similar with Bind,
+// but it read the Params.
+type BindingUri interface {
+ Name() string
+ BindUri(map[string][]string, interface{}) error
+}
+
+// StructValidator is the minimal interface which needs to be implemented in
+// order for it to be used as the validator engine for ensuring the correctness
+// of the request. Gin provides a default implementation for this using
+// https://github.com/go-playground/validator/tree/v8.18.2.
+type StructValidator interface {
+ // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right.
+ // If the received type is not a struct, any validation should be skipped and nil must be returned.
+ // If the received type is a struct or pointer to a struct, the validation should be performed.
+ // If the struct is not valid or the validation itself fails, a descriptive error should be returned.
+ // Otherwise nil must be returned.
+ ValidateStruct(interface{}) error
+
+ // Engine returns the underlying validator engine which powers the
+ // StructValidator implementation.
+ Engine() interface{}
+}
+
+// Validator is the default validator which implements the StructValidator
+// interface. It uses https://github.com/go-playground/validator/tree/v8.18.2
+// under the hood.
+var Validator StructValidator = &defaultValidator{}
+
+// These implement the Binding interface and can be used to bind the data
+// present in the request to struct instances.
+var (
+ JSON = jsonBinding{}
+ XML = xmlBinding{}
+ Form = formBinding{}
+ Query = queryBinding{}
+ FormPost = formPostBinding{}
+ FormMultipart = formMultipartBinding{}
+ ProtoBuf = protobufBinding{}
+ YAML = yamlBinding{}
+ Uri = uriBinding{}
+ Header = headerBinding{}
+)
+
+// Default returns the appropriate Binding instance based on the HTTP method
+// and the content type.
+func Default(method, contentType string) Binding {
+ if method == "GET" {
+ return Form
+ }
+
+ switch contentType {
+ case MIMEJSON:
+ return JSON
+ case MIMEXML, MIMEXML2:
+ return XML
+ case MIMEPROTOBUF:
+ return ProtoBuf
+ case MIMEYAML:
+ return YAML
+ case MIMEMultipartPOSTForm:
+ return FormMultipart
+ default: // case MIMEPOSTForm:
+ return Form
+ }
+}
+
+func validate(obj interface{}) error {
+ if Validator == nil {
+ return nil
+ }
+ return Validator.ValidateStruct(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/default_validator.go b/vendor/github.com/gin-gonic/gin/binding/default_validator.go
new file mode 100644
index 0000000..c57a120
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/default_validator.go
@@ -0,0 +1,85 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/go-playground/validator/v10"
+)
+
+type defaultValidator struct {
+ once sync.Once
+ validate *validator.Validate
+}
+
+type sliceValidateError []error
+
+func (err sliceValidateError) Error() string {
+ var errMsgs []string
+ for i, e := range err {
+ if e == nil {
+ continue
+ }
+ errMsgs = append(errMsgs, fmt.Sprintf("[%d]: %s", i, e.Error()))
+ }
+ return strings.Join(errMsgs, "\n")
+}
+
+var _ StructValidator = &defaultValidator{}
+
+// ValidateStruct receives any kind of type, but only performed struct or pointer to struct type.
+func (v *defaultValidator) ValidateStruct(obj interface{}) error {
+ if obj == nil {
+ return nil
+ }
+
+ value := reflect.ValueOf(obj)
+ switch value.Kind() {
+ case reflect.Ptr:
+ return v.ValidateStruct(value.Elem().Interface())
+ case reflect.Struct:
+ return v.validateStruct(obj)
+ case reflect.Slice, reflect.Array:
+ count := value.Len()
+ validateRet := make(sliceValidateError, 0)
+ for i := 0; i < count; i++ {
+ if err := v.ValidateStruct(value.Index(i).Interface()); err != nil {
+ validateRet = append(validateRet, err)
+ }
+ }
+ if len(validateRet) == 0 {
+ return nil
+ }
+ return validateRet
+ default:
+ return nil
+ }
+}
+
+// validateStruct receives struct type
+func (v *defaultValidator) validateStruct(obj interface{}) error {
+ v.lazyinit()
+ return v.validate.Struct(obj)
+}
+
+// Engine returns the underlying validator engine which powers the default
+// Validator instance. This is useful if you want to register custom validations
+// or struct level validations. See validator GoDoc for more info -
+// https://godoc.org/gopkg.in/go-playground/validator.v8
+func (v *defaultValidator) Engine() interface{} {
+ v.lazyinit()
+ return v.validate
+}
+
+func (v *defaultValidator) lazyinit() {
+ v.once.Do(func() {
+ v.validate = validator.New()
+ v.validate.SetTagName("binding")
+ })
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/form.go b/vendor/github.com/gin-gonic/gin/binding/form.go
new file mode 100644
index 0000000..b93c34c
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/form.go
@@ -0,0 +1,63 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "net/http"
+)
+
+const defaultMemory = 32 << 20
+
+type formBinding struct{}
+type formPostBinding struct{}
+type formMultipartBinding struct{}
+
+func (formBinding) Name() string {
+ return "form"
+}
+
+func (formBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseForm(); err != nil {
+ return err
+ }
+ if err := req.ParseMultipartForm(defaultMemory); err != nil {
+ if err != http.ErrNotMultipart {
+ return err
+ }
+ }
+ if err := mapForm(obj, req.Form); err != nil {
+ return err
+ }
+ return validate(obj)
+}
+
+func (formPostBinding) Name() string {
+ return "form-urlencoded"
+}
+
+func (formPostBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseForm(); err != nil {
+ return err
+ }
+ if err := mapForm(obj, req.PostForm); err != nil {
+ return err
+ }
+ return validate(obj)
+}
+
+func (formMultipartBinding) Name() string {
+ return "multipart/form-data"
+}
+
+func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error {
+ if err := req.ParseMultipartForm(defaultMemory); err != nil {
+ return err
+ }
+ if err := mappingByPtr(obj, (*multipartRequest)(req), "form"); err != nil {
+ return err
+ }
+
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go
new file mode 100644
index 0000000..2f4e45b
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go
@@ -0,0 +1,392 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/gin-gonic/gin/internal/bytesconv"
+ "github.com/gin-gonic/gin/internal/json"
+)
+
+var errUnknownType = errors.New("unknown type")
+
+func mapUri(ptr interface{}, m map[string][]string) error {
+ return mapFormByTag(ptr, m, "uri")
+}
+
+func mapForm(ptr interface{}, form map[string][]string) error {
+ return mapFormByTag(ptr, form, "form")
+}
+
+var emptyField = reflect.StructField{}
+
+func mapFormByTag(ptr interface{}, form map[string][]string, tag string) error {
+ // Check if ptr is a map
+ ptrVal := reflect.ValueOf(ptr)
+ var pointed interface{}
+ if ptrVal.Kind() == reflect.Ptr {
+ ptrVal = ptrVal.Elem()
+ pointed = ptrVal.Interface()
+ }
+ if ptrVal.Kind() == reflect.Map &&
+ ptrVal.Type().Key().Kind() == reflect.String {
+ if pointed != nil {
+ ptr = pointed
+ }
+ return setFormMap(ptr, form)
+ }
+
+ return mappingByPtr(ptr, formSource(form), tag)
+}
+
+// setter tries to set value on a walking by fields of a struct
+type setter interface {
+ TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error)
+}
+
+type formSource map[string][]string
+
+var _ setter = formSource(nil)
+
+// TrySet tries to set a value by request's form source (like map[string][]string)
+func (form formSource) TrySet(value reflect.Value, field reflect.StructField, tagValue string, opt setOptions) (isSetted bool, err error) {
+ return setByForm(value, field, form, tagValue, opt)
+}
+
+func mappingByPtr(ptr interface{}, setter setter, tag string) error {
+ _, err := mapping(reflect.ValueOf(ptr), emptyField, setter, tag)
+ return err
+}
+
+func mapping(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) {
+ if field.Tag.Get(tag) == "-" { // just ignoring this field
+ return false, nil
+ }
+
+ var vKind = value.Kind()
+
+ if vKind == reflect.Ptr {
+ var isNew bool
+ vPtr := value
+ if value.IsNil() {
+ isNew = true
+ vPtr = reflect.New(value.Type().Elem())
+ }
+ isSetted, err := mapping(vPtr.Elem(), field, setter, tag)
+ if err != nil {
+ return false, err
+ }
+ if isNew && isSetted {
+ value.Set(vPtr)
+ }
+ return isSetted, nil
+ }
+
+ if vKind != reflect.Struct || !field.Anonymous {
+ ok, err := tryToSetValue(value, field, setter, tag)
+ if err != nil {
+ return false, err
+ }
+ if ok {
+ return true, nil
+ }
+ }
+
+ if vKind == reflect.Struct {
+ tValue := value.Type()
+
+ var isSetted bool
+ for i := 0; i < value.NumField(); i++ {
+ sf := tValue.Field(i)
+ if sf.PkgPath != "" && !sf.Anonymous { // unexported
+ continue
+ }
+ ok, err := mapping(value.Field(i), tValue.Field(i), setter, tag)
+ if err != nil {
+ return false, err
+ }
+ isSetted = isSetted || ok
+ }
+ return isSetted, nil
+ }
+ return false, nil
+}
+
+type setOptions struct {
+ isDefaultExists bool
+ defaultValue string
+}
+
+func tryToSetValue(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) {
+ var tagValue string
+ var setOpt setOptions
+
+ tagValue = field.Tag.Get(tag)
+ tagValue, opts := head(tagValue, ",")
+
+ if tagValue == "" { // default value is FieldName
+ tagValue = field.Name
+ }
+ if tagValue == "" { // when field is "emptyField" variable
+ return false, nil
+ }
+
+ var opt string
+ for len(opts) > 0 {
+ opt, opts = head(opts, ",")
+
+ if k, v := head(opt, "="); k == "default" {
+ setOpt.isDefaultExists = true
+ setOpt.defaultValue = v
+ }
+ }
+
+ return setter.TrySet(value, field, tagValue, setOpt)
+}
+
+func setByForm(value reflect.Value, field reflect.StructField, form map[string][]string, tagValue string, opt setOptions) (isSetted bool, err error) {
+ vs, ok := form[tagValue]
+ if !ok && !opt.isDefaultExists {
+ return false, nil
+ }
+
+ switch value.Kind() {
+ case reflect.Slice:
+ if !ok {
+ vs = []string{opt.defaultValue}
+ }
+ return true, setSlice(vs, value, field)
+ case reflect.Array:
+ if !ok {
+ vs = []string{opt.defaultValue}
+ }
+ if len(vs) != value.Len() {
+ return false, fmt.Errorf("%q is not valid value for %s", vs, value.Type().String())
+ }
+ return true, setArray(vs, value, field)
+ default:
+ var val string
+ if !ok {
+ val = opt.defaultValue
+ }
+
+ if len(vs) > 0 {
+ val = vs[0]
+ }
+ return true, setWithProperType(val, value, field)
+ }
+}
+
+func setWithProperType(val string, value reflect.Value, field reflect.StructField) error {
+ switch value.Kind() {
+ case reflect.Int:
+ return setIntField(val, 0, value)
+ case reflect.Int8:
+ return setIntField(val, 8, value)
+ case reflect.Int16:
+ return setIntField(val, 16, value)
+ case reflect.Int32:
+ return setIntField(val, 32, value)
+ case reflect.Int64:
+ switch value.Interface().(type) {
+ case time.Duration:
+ return setTimeDuration(val, value, field)
+ }
+ return setIntField(val, 64, value)
+ case reflect.Uint:
+ return setUintField(val, 0, value)
+ case reflect.Uint8:
+ return setUintField(val, 8, value)
+ case reflect.Uint16:
+ return setUintField(val, 16, value)
+ case reflect.Uint32:
+ return setUintField(val, 32, value)
+ case reflect.Uint64:
+ return setUintField(val, 64, value)
+ case reflect.Bool:
+ return setBoolField(val, value)
+ case reflect.Float32:
+ return setFloatField(val, 32, value)
+ case reflect.Float64:
+ return setFloatField(val, 64, value)
+ case reflect.String:
+ value.SetString(val)
+ case reflect.Struct:
+ switch value.Interface().(type) {
+ case time.Time:
+ return setTimeField(val, field, value)
+ }
+ return json.Unmarshal(bytesconv.StringToBytes(val), value.Addr().Interface())
+ case reflect.Map:
+ return json.Unmarshal(bytesconv.StringToBytes(val), value.Addr().Interface())
+ default:
+ return errUnknownType
+ }
+ return nil
+}
+
+func setIntField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0"
+ }
+ intVal, err := strconv.ParseInt(val, 10, bitSize)
+ if err == nil {
+ field.SetInt(intVal)
+ }
+ return err
+}
+
+func setUintField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0"
+ }
+ uintVal, err := strconv.ParseUint(val, 10, bitSize)
+ if err == nil {
+ field.SetUint(uintVal)
+ }
+ return err
+}
+
+func setBoolField(val string, field reflect.Value) error {
+ if val == "" {
+ val = "false"
+ }
+ boolVal, err := strconv.ParseBool(val)
+ if err == nil {
+ field.SetBool(boolVal)
+ }
+ return err
+}
+
+func setFloatField(val string, bitSize int, field reflect.Value) error {
+ if val == "" {
+ val = "0.0"
+ }
+ floatVal, err := strconv.ParseFloat(val, bitSize)
+ if err == nil {
+ field.SetFloat(floatVal)
+ }
+ return err
+}
+
+func setTimeField(val string, structField reflect.StructField, value reflect.Value) error {
+ timeFormat := structField.Tag.Get("time_format")
+ if timeFormat == "" {
+ timeFormat = time.RFC3339
+ }
+
+ switch tf := strings.ToLower(timeFormat); tf {
+ case "unix", "unixnano":
+ tv, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return err
+ }
+
+ d := time.Duration(1)
+ if tf == "unixnano" {
+ d = time.Second
+ }
+
+ t := time.Unix(tv/int64(d), tv%int64(d))
+ value.Set(reflect.ValueOf(t))
+ return nil
+
+ }
+
+ if val == "" {
+ value.Set(reflect.ValueOf(time.Time{}))
+ return nil
+ }
+
+ l := time.Local
+ if isUTC, _ := strconv.ParseBool(structField.Tag.Get("time_utc")); isUTC {
+ l = time.UTC
+ }
+
+ if locTag := structField.Tag.Get("time_location"); locTag != "" {
+ loc, err := time.LoadLocation(locTag)
+ if err != nil {
+ return err
+ }
+ l = loc
+ }
+
+ t, err := time.ParseInLocation(timeFormat, val, l)
+ if err != nil {
+ return err
+ }
+
+ value.Set(reflect.ValueOf(t))
+ return nil
+}
+
+func setArray(vals []string, value reflect.Value, field reflect.StructField) error {
+ for i, s := range vals {
+ err := setWithProperType(s, value.Index(i), field)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func setSlice(vals []string, value reflect.Value, field reflect.StructField) error {
+ slice := reflect.MakeSlice(value.Type(), len(vals), len(vals))
+ err := setArray(vals, slice, field)
+ if err != nil {
+ return err
+ }
+ value.Set(slice)
+ return nil
+}
+
+func setTimeDuration(val string, value reflect.Value, field reflect.StructField) error {
+ d, err := time.ParseDuration(val)
+ if err != nil {
+ return err
+ }
+ value.Set(reflect.ValueOf(d))
+ return nil
+}
+
+func head(str, sep string) (head string, tail string) {
+ idx := strings.Index(str, sep)
+ if idx < 0 {
+ return str, ""
+ }
+ return str[:idx], str[idx+len(sep):]
+}
+
+func setFormMap(ptr interface{}, form map[string][]string) error {
+ el := reflect.TypeOf(ptr).Elem()
+
+ if el.Kind() == reflect.Slice {
+ ptrMap, ok := ptr.(map[string][]string)
+ if !ok {
+ return errors.New("cannot convert to map slices of strings")
+ }
+ for k, v := range form {
+ ptrMap[k] = v
+ }
+
+ return nil
+ }
+
+ ptrMap, ok := ptr.(map[string]string)
+ if !ok {
+ return errors.New("cannot convert to map of strings")
+ }
+ for k, v := range form {
+ ptrMap[k] = v[len(v)-1] // pick last
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/header.go b/vendor/github.com/gin-gonic/gin/binding/header.go
new file mode 100644
index 0000000..179ce4e
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/header.go
@@ -0,0 +1,34 @@
+package binding
+
+import (
+ "net/http"
+ "net/textproto"
+ "reflect"
+)
+
+type headerBinding struct{}
+
+func (headerBinding) Name() string {
+ return "header"
+}
+
+func (headerBinding) Bind(req *http.Request, obj interface{}) error {
+
+ if err := mapHeader(obj, req.Header); err != nil {
+ return err
+ }
+
+ return validate(obj)
+}
+
+func mapHeader(ptr interface{}, h map[string][]string) error {
+ return mappingByPtr(ptr, headerSource(h), "header")
+}
+
+type headerSource map[string][]string
+
+var _ setter = headerSource(nil)
+
+func (hs headerSource) TrySet(value reflect.Value, field reflect.StructField, tagValue string, opt setOptions) (isSetted bool, err error) {
+ return setByForm(value, field, hs, textproto.CanonicalMIMEHeaderKey(tagValue), opt)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/json.go b/vendor/github.com/gin-gonic/gin/binding/json.go
new file mode 100644
index 0000000..d62e070
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/json.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/gin-gonic/gin/internal/json"
+)
+
+// EnableDecoderUseNumber is used to call the UseNumber method on the JSON
+// Decoder instance. UseNumber causes the Decoder to unmarshal a number into an
+// interface{} as a Number instead of as a float64.
+var EnableDecoderUseNumber = false
+
+// EnableDecoderDisallowUnknownFields is used to call the DisallowUnknownFields method
+// on the JSON Decoder instance. DisallowUnknownFields causes the Decoder to
+// return an error when the destination is a struct and the input contains object
+// keys which do not match any non-ignored, exported fields in the destination.
+var EnableDecoderDisallowUnknownFields = false
+
+type jsonBinding struct{}
+
+func (jsonBinding) Name() string {
+ return "json"
+}
+
+func (jsonBinding) Bind(req *http.Request, obj interface{}) error {
+ if req == nil || req.Body == nil {
+ return fmt.Errorf("invalid request")
+ }
+ return decodeJSON(req.Body, obj)
+}
+
+func (jsonBinding) BindBody(body []byte, obj interface{}) error {
+ return decodeJSON(bytes.NewReader(body), obj)
+}
+
+func decodeJSON(r io.Reader, obj interface{}) error {
+ decoder := json.NewDecoder(r)
+ if EnableDecoderUseNumber {
+ decoder.UseNumber()
+ }
+ if EnableDecoderDisallowUnknownFields {
+ decoder.DisallowUnknownFields()
+ }
+ if err := decoder.Decode(obj); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/msgpack.go b/vendor/github.com/gin-gonic/gin/binding/msgpack.go
new file mode 100644
index 0000000..2a44299
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/msgpack.go
@@ -0,0 +1,38 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+//go:build !nomsgpack
+// +build !nomsgpack
+
+package binding
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+
+ "github.com/ugorji/go/codec"
+)
+
+type msgpackBinding struct{}
+
+func (msgpackBinding) Name() string {
+ return "msgpack"
+}
+
+func (msgpackBinding) Bind(req *http.Request, obj interface{}) error {
+ return decodeMsgPack(req.Body, obj)
+}
+
+func (msgpackBinding) BindBody(body []byte, obj interface{}) error {
+ return decodeMsgPack(bytes.NewReader(body), obj)
+}
+
+func decodeMsgPack(r io.Reader, obj interface{}) error {
+ cdc := new(codec.MsgpackHandle)
+ if err := codec.NewDecoder(r, cdc).Decode(&obj); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go b/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go
new file mode 100644
index 0000000..f85a1aa
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go
@@ -0,0 +1,66 @@
+// Copyright 2019 Gin Core Team. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "errors"
+ "mime/multipart"
+ "net/http"
+ "reflect"
+)
+
+type multipartRequest http.Request
+
+var _ setter = (*multipartRequest)(nil)
+
+// TrySet tries to set a value by the multipart request with the binding a form file
+func (r *multipartRequest) TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error) {
+ if files := r.MultipartForm.File[key]; len(files) != 0 {
+ return setByMultipartFormFile(value, field, files)
+ }
+
+ return setByForm(value, field, r.MultipartForm.Value, key, opt)
+}
+
+func setByMultipartFormFile(value reflect.Value, field reflect.StructField, files []*multipart.FileHeader) (isSetted bool, err error) {
+ switch value.Kind() {
+ case reflect.Ptr:
+ switch value.Interface().(type) {
+ case *multipart.FileHeader:
+ value.Set(reflect.ValueOf(files[0]))
+ return true, nil
+ }
+ case reflect.Struct:
+ switch value.Interface().(type) {
+ case multipart.FileHeader:
+ value.Set(reflect.ValueOf(*files[0]))
+ return true, nil
+ }
+ case reflect.Slice:
+ slice := reflect.MakeSlice(value.Type(), len(files), len(files))
+ isSetted, err = setArrayOfMultipartFormFiles(slice, field, files)
+ if err != nil || !isSetted {
+ return isSetted, err
+ }
+ value.Set(slice)
+ return true, nil
+ case reflect.Array:
+ return setArrayOfMultipartFormFiles(value, field, files)
+ }
+ return false, errors.New("unsupported field type for multipart.FileHeader")
+}
+
+func setArrayOfMultipartFormFiles(value reflect.Value, field reflect.StructField, files []*multipart.FileHeader) (isSetted bool, err error) {
+ if value.Len() != len(files) {
+ return false, errors.New("unsupported len of array for []*multipart.FileHeader")
+ }
+ for i := range files {
+ setted, err := setByMultipartFormFile(value.Index(i), field, files[i:i+1])
+ if err != nil || !setted {
+ return setted, err
+ }
+ }
+ return true, nil
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/protobuf.go b/vendor/github.com/gin-gonic/gin/binding/protobuf.go
new file mode 100644
index 0000000..f9ece92
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/protobuf.go
@@ -0,0 +1,36 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "io/ioutil"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+)
+
+type protobufBinding struct{}
+
+func (protobufBinding) Name() string {
+ return "protobuf"
+}
+
+func (b protobufBinding) Bind(req *http.Request, obj interface{}) error {
+ buf, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ return err
+ }
+ return b.BindBody(buf, obj)
+}
+
+func (protobufBinding) BindBody(body []byte, obj interface{}) error {
+ if err := proto.Unmarshal(body, obj.(proto.Message)); err != nil {
+ return err
+ }
+ // Here it's same to return validate(obj), but util now we can't add
+ // `binding:""` to the struct which automatically generate by gen-proto
+ return nil
+ // return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/query.go b/vendor/github.com/gin-gonic/gin/binding/query.go
new file mode 100644
index 0000000..219743f
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/query.go
@@ -0,0 +1,21 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import "net/http"
+
+type queryBinding struct{}
+
+func (queryBinding) Name() string {
+ return "query"
+}
+
+func (queryBinding) Bind(req *http.Request, obj interface{}) error {
+ values := req.URL.Query()
+ if err := mapForm(obj, values); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/uri.go b/vendor/github.com/gin-gonic/gin/binding/uri.go
new file mode 100644
index 0000000..f91ec38
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/uri.go
@@ -0,0 +1,18 @@
+// Copyright 2018 Gin Core Team. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+type uriBinding struct{}
+
+func (uriBinding) Name() string {
+ return "uri"
+}
+
+func (uriBinding) BindUri(m map[string][]string, obj interface{}) error {
+ if err := mapUri(obj, m); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/xml.go b/vendor/github.com/gin-gonic/gin/binding/xml.go
new file mode 100644
index 0000000..4e90114
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/xml.go
@@ -0,0 +1,33 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "bytes"
+ "encoding/xml"
+ "io"
+ "net/http"
+)
+
+type xmlBinding struct{}
+
+func (xmlBinding) Name() string {
+ return "xml"
+}
+
+func (xmlBinding) Bind(req *http.Request, obj interface{}) error {
+ return decodeXML(req.Body, obj)
+}
+
+func (xmlBinding) BindBody(body []byte, obj interface{}) error {
+ return decodeXML(bytes.NewReader(body), obj)
+}
+func decodeXML(r io.Reader, obj interface{}) error {
+ decoder := xml.NewDecoder(r)
+ if err := decoder.Decode(obj); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/binding/yaml.go b/vendor/github.com/gin-gonic/gin/binding/yaml.go
new file mode 100644
index 0000000..a2d36d6
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/binding/yaml.go
@@ -0,0 +1,35 @@
+// Copyright 2018 Gin Core Team. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package binding
+
+import (
+ "bytes"
+ "io"
+ "net/http"
+
+ "gopkg.in/yaml.v2"
+)
+
+type yamlBinding struct{}
+
+func (yamlBinding) Name() string {
+ return "yaml"
+}
+
+func (yamlBinding) Bind(req *http.Request, obj interface{}) error {
+ return decodeYAML(req.Body, obj)
+}
+
+func (yamlBinding) BindBody(body []byte, obj interface{}) error {
+ return decodeYAML(bytes.NewReader(body), obj)
+}
+
+func decodeYAML(r io.Reader, obj interface{}) error {
+ decoder := yaml.NewDecoder(r)
+ if err := decoder.Decode(obj); err != nil {
+ return err
+ }
+ return validate(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/codecov.yml b/vendor/github.com/gin-gonic/gin/codecov.yml
new file mode 100644
index 0000000..c9c9a52
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/codecov.yml
@@ -0,0 +1,5 @@
+coverage:
+ notify:
+ gitter:
+ default:
+ url: https://webhooks.gitter.im/e/d90dcdeeab2f1e357165
diff --git a/vendor/github.com/gin-gonic/gin/context.go b/vendor/github.com/gin-gonic/gin/context.go
new file mode 100644
index 0000000..dc03c35
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/context.go
@@ -0,0 +1,1178 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "mime/multipart"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/gin-contrib/sse"
+ "github.com/gin-gonic/gin/binding"
+ "github.com/gin-gonic/gin/render"
+)
+
+// Content-Type MIME of the most common data formats.
+const (
+ MIMEJSON = binding.MIMEJSON
+ MIMEHTML = binding.MIMEHTML
+ MIMEXML = binding.MIMEXML
+ MIMEXML2 = binding.MIMEXML2
+ MIMEPlain = binding.MIMEPlain
+ MIMEPOSTForm = binding.MIMEPOSTForm
+ MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm
+ MIMEYAML = binding.MIMEYAML
+)
+
+// BodyBytesKey indicates a default body bytes key.
+const BodyBytesKey = "_gin-gonic/gin/bodybyteskey"
+
+const abortIndex int8 = math.MaxInt8 / 2
+
+// Context is the most important part of gin. It allows us to pass variables between middleware,
+// manage the flow, validate the JSON of a request and render a JSON response for example.
+type Context struct {
+ writermem responseWriter
+ Request *http.Request
+ Writer ResponseWriter
+
+ Params Params
+ handlers HandlersChain
+ index int8
+ fullPath string
+
+ engine *Engine
+ params *Params
+
+ // This mutex protect Keys map
+ mu sync.RWMutex
+
+ // Keys is a key/value pair exclusively for the context of each request.
+ Keys map[string]interface{}
+
+ // Errors is a list of errors attached to all the handlers/middlewares who used this context.
+ Errors errorMsgs
+
+ // Accepted defines a list of manually accepted formats for content negotiation.
+ Accepted []string
+
+ // queryCache use url.ParseQuery cached the param query result from c.Request.URL.Query()
+ queryCache url.Values
+
+ // formCache use url.ParseQuery cached PostForm contains the parsed form data from POST, PATCH,
+ // or PUT body parameters.
+ formCache url.Values
+
+ // SameSite allows a server to define a cookie attribute making it impossible for
+ // the browser to send this cookie along with cross-site requests.
+ sameSite http.SameSite
+}
+
+/************************************/
+/********** CONTEXT CREATION ********/
+/************************************/
+
+func (c *Context) reset() {
+ c.Writer = &c.writermem
+ c.Params = c.Params[0:0]
+ c.handlers = nil
+ c.index = -1
+
+ c.fullPath = ""
+ c.Keys = nil
+ c.Errors = c.Errors[0:0]
+ c.Accepted = nil
+ c.queryCache = nil
+ c.formCache = nil
+ *c.params = (*c.params)[0:0]
+}
+
+// Copy returns a copy of the current context that can be safely used outside the request's scope.
+// This has to be used when the context has to be passed to a goroutine.
+func (c *Context) Copy() *Context {
+ cp := Context{
+ writermem: c.writermem,
+ Request: c.Request,
+ Params: c.Params,
+ engine: c.engine,
+ }
+ cp.writermem.ResponseWriter = nil
+ cp.Writer = &cp.writermem
+ cp.index = abortIndex
+ cp.handlers = nil
+ cp.Keys = map[string]interface{}{}
+ for k, v := range c.Keys {
+ cp.Keys[k] = v
+ }
+ paramCopy := make([]Param, len(cp.Params))
+ copy(paramCopy, cp.Params)
+ cp.Params = paramCopy
+ return &cp
+}
+
+// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()",
+// this function will return "main.handleGetUsers".
+func (c *Context) HandlerName() string {
+ return nameOfFunction(c.handlers.Last())
+}
+
+// HandlerNames returns a list of all registered handlers for this context in descending order,
+// following the semantics of HandlerName()
+func (c *Context) HandlerNames() []string {
+ hn := make([]string, 0, len(c.handlers))
+ for _, val := range c.handlers {
+ hn = append(hn, nameOfFunction(val))
+ }
+ return hn
+}
+
+// Handler returns the main handler.
+func (c *Context) Handler() HandlerFunc {
+ return c.handlers.Last()
+}
+
+// FullPath returns a matched route full path. For not found routes
+// returns an empty string.
+// router.GET("/user/:id", func(c *gin.Context) {
+// c.FullPath() == "/user/:id" // true
+// })
+func (c *Context) FullPath() string {
+ return c.fullPath
+}
+
+/************************************/
+/*********** FLOW CONTROL ***********/
+/************************************/
+
+// Next should be used only inside middleware.
+// It executes the pending handlers in the chain inside the calling handler.
+// See example in GitHub.
+func (c *Context) Next() {
+ c.index++
+ for c.index < int8(len(c.handlers)) {
+ c.handlers[c.index](c)
+ c.index++
+ }
+}
+
+// IsAborted returns true if the current context was aborted.
+func (c *Context) IsAborted() bool {
+ return c.index >= abortIndex
+}
+
+// Abort prevents pending handlers from being called. Note that this will not stop the current handler.
+// Let's say you have an authorization middleware that validates that the current request is authorized.
+// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers
+// for this request are not called.
+func (c *Context) Abort() {
+ c.index = abortIndex
+}
+
+// AbortWithStatus calls `Abort()` and writes the headers with the specified status code.
+// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401).
+func (c *Context) AbortWithStatus(code int) {
+ c.Status(code)
+ c.Writer.WriteHeaderNow()
+ c.Abort()
+}
+
+// AbortWithStatusJSON calls `Abort()` and then `JSON` internally.
+// This method stops the chain, writes the status code and return a JSON body.
+// It also sets the Content-Type as "application/json".
+func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) {
+ c.Abort()
+ c.JSON(code, jsonObj)
+}
+
+// AbortWithError calls `AbortWithStatus()` and `Error()` internally.
+// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`.
+// See Context.Error() for more details.
+func (c *Context) AbortWithError(code int, err error) *Error {
+ c.AbortWithStatus(code)
+ return c.Error(err)
+}
+
+/************************************/
+/********* ERROR MANAGEMENT *********/
+/************************************/
+
+// Error attaches an error to the current context. The error is pushed to a list of errors.
+// It's a good idea to call Error for each error that occurred during the resolution of a request.
+// A middleware can be used to collect all the errors and push them to a database together,
+// print a log, or append it in the HTTP response.
+// Error will panic if err is nil.
+func (c *Context) Error(err error) *Error {
+ if err == nil {
+ panic("err is nil")
+ }
+
+ parsedError, ok := err.(*Error)
+ if !ok {
+ parsedError = &Error{
+ Err: err,
+ Type: ErrorTypePrivate,
+ }
+ }
+
+ c.Errors = append(c.Errors, parsedError)
+ return parsedError
+}
+
+/************************************/
+/******** METADATA MANAGEMENT********/
+/************************************/
+
+// Set is used to store a new key/value pair exclusively for this context.
+// It also lazy initializes c.Keys if it was not used previously.
+func (c *Context) Set(key string, value interface{}) {
+ c.mu.Lock()
+ if c.Keys == nil {
+ c.Keys = make(map[string]interface{})
+ }
+
+ c.Keys[key] = value
+ c.mu.Unlock()
+}
+
+// Get returns the value for the given key, ie: (value, true).
+// If the value does not exists it returns (nil, false)
+func (c *Context) Get(key string) (value interface{}, exists bool) {
+ c.mu.RLock()
+ value, exists = c.Keys[key]
+ c.mu.RUnlock()
+ return
+}
+
+// MustGet returns the value for the given key if it exists, otherwise it panics.
+func (c *Context) MustGet(key string) interface{} {
+ if value, exists := c.Get(key); exists {
+ return value
+ }
+ panic("Key \"" + key + "\" does not exist")
+}
+
+// GetString returns the value associated with the key as a string.
+func (c *Context) GetString(key string) (s string) {
+ if val, ok := c.Get(key); ok && val != nil {
+ s, _ = val.(string)
+ }
+ return
+}
+
+// GetBool returns the value associated with the key as a boolean.
+func (c *Context) GetBool(key string) (b bool) {
+ if val, ok := c.Get(key); ok && val != nil {
+ b, _ = val.(bool)
+ }
+ return
+}
+
+// GetInt returns the value associated with the key as an integer.
+func (c *Context) GetInt(key string) (i int) {
+ if val, ok := c.Get(key); ok && val != nil {
+ i, _ = val.(int)
+ }
+ return
+}
+
+// GetInt64 returns the value associated with the key as an integer.
+func (c *Context) GetInt64(key string) (i64 int64) {
+ if val, ok := c.Get(key); ok && val != nil {
+ i64, _ = val.(int64)
+ }
+ return
+}
+
+// GetUint returns the value associated with the key as an unsigned integer.
+func (c *Context) GetUint(key string) (ui uint) {
+ if val, ok := c.Get(key); ok && val != nil {
+ ui, _ = val.(uint)
+ }
+ return
+}
+
+// GetUint64 returns the value associated with the key as an unsigned integer.
+func (c *Context) GetUint64(key string) (ui64 uint64) {
+ if val, ok := c.Get(key); ok && val != nil {
+ ui64, _ = val.(uint64)
+ }
+ return
+}
+
+// GetFloat64 returns the value associated with the key as a float64.
+func (c *Context) GetFloat64(key string) (f64 float64) {
+ if val, ok := c.Get(key); ok && val != nil {
+ f64, _ = val.(float64)
+ }
+ return
+}
+
+// GetTime returns the value associated with the key as time.
+func (c *Context) GetTime(key string) (t time.Time) {
+ if val, ok := c.Get(key); ok && val != nil {
+ t, _ = val.(time.Time)
+ }
+ return
+}
+
+// GetDuration returns the value associated with the key as a duration.
+func (c *Context) GetDuration(key string) (d time.Duration) {
+ if val, ok := c.Get(key); ok && val != nil {
+ d, _ = val.(time.Duration)
+ }
+ return
+}
+
+// GetStringSlice returns the value associated with the key as a slice of strings.
+func (c *Context) GetStringSlice(key string) (ss []string) {
+ if val, ok := c.Get(key); ok && val != nil {
+ ss, _ = val.([]string)
+ }
+ return
+}
+
+// GetStringMap returns the value associated with the key as a map of interfaces.
+func (c *Context) GetStringMap(key string) (sm map[string]interface{}) {
+ if val, ok := c.Get(key); ok && val != nil {
+ sm, _ = val.(map[string]interface{})
+ }
+ return
+}
+
+// GetStringMapString returns the value associated with the key as a map of strings.
+func (c *Context) GetStringMapString(key string) (sms map[string]string) {
+ if val, ok := c.Get(key); ok && val != nil {
+ sms, _ = val.(map[string]string)
+ }
+ return
+}
+
+// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
+func (c *Context) GetStringMapStringSlice(key string) (smss map[string][]string) {
+ if val, ok := c.Get(key); ok && val != nil {
+ smss, _ = val.(map[string][]string)
+ }
+ return
+}
+
+/************************************/
+/************ INPUT DATA ************/
+/************************************/
+
+// Param returns the value of the URL param.
+// It is a shortcut for c.Params.ByName(key)
+// router.GET("/user/:id", func(c *gin.Context) {
+// // a GET request to /user/john
+// id := c.Param("id") // id == "john"
+// })
+func (c *Context) Param(key string) string {
+ return c.Params.ByName(key)
+}
+
+// Query returns the keyed url query value if it exists,
+// otherwise it returns an empty string `("")`.
+// It is shortcut for `c.Request.URL.Query().Get(key)`
+// GET /path?id=1234&name=Manu&value=
+// c.Query("id") == "1234"
+// c.Query("name") == "Manu"
+// c.Query("value") == ""
+// c.Query("wtf") == ""
+func (c *Context) Query(key string) string {
+ value, _ := c.GetQuery(key)
+ return value
+}
+
+// DefaultQuery returns the keyed url query value if it exists,
+// otherwise it returns the specified defaultValue string.
+// See: Query() and GetQuery() for further information.
+// GET /?name=Manu&lastname=
+// c.DefaultQuery("name", "unknown") == "Manu"
+// c.DefaultQuery("id", "none") == "none"
+// c.DefaultQuery("lastname", "none") == ""
+func (c *Context) DefaultQuery(key, defaultValue string) string {
+ if value, ok := c.GetQuery(key); ok {
+ return value
+ }
+ return defaultValue
+}
+
+// GetQuery is like Query(), it returns the keyed url query value
+// if it exists `(value, true)` (even when the value is an empty string),
+// otherwise it returns `("", false)`.
+// It is shortcut for `c.Request.URL.Query().Get(key)`
+// GET /?name=Manu&lastname=
+// ("Manu", true) == c.GetQuery("name")
+// ("", false) == c.GetQuery("id")
+// ("", true) == c.GetQuery("lastname")
+func (c *Context) GetQuery(key string) (string, bool) {
+ if values, ok := c.GetQueryArray(key); ok {
+ return values[0], ok
+ }
+ return "", false
+}
+
+// QueryArray returns a slice of strings for a given query key.
+// The length of the slice depends on the number of params with the given key.
+func (c *Context) QueryArray(key string) []string {
+ values, _ := c.GetQueryArray(key)
+ return values
+}
+
+func (c *Context) initQueryCache() {
+ if c.queryCache == nil {
+ if c.Request != nil {
+ c.queryCache = c.Request.URL.Query()
+ } else {
+ c.queryCache = url.Values{}
+ }
+ }
+}
+
+// GetQueryArray returns a slice of strings for a given query key, plus
+// a boolean value whether at least one value exists for the given key.
+func (c *Context) GetQueryArray(key string) ([]string, bool) {
+ c.initQueryCache()
+ if values, ok := c.queryCache[key]; ok && len(values) > 0 {
+ return values, true
+ }
+ return []string{}, false
+}
+
+// QueryMap returns a map for a given query key.
+func (c *Context) QueryMap(key string) map[string]string {
+ dicts, _ := c.GetQueryMap(key)
+ return dicts
+}
+
+// GetQueryMap returns a map for a given query key, plus a boolean value
+// whether at least one value exists for the given key.
+func (c *Context) GetQueryMap(key string) (map[string]string, bool) {
+ c.initQueryCache()
+ return c.get(c.queryCache, key)
+}
+
+// PostForm returns the specified key from a POST urlencoded form or multipart form
+// when it exists, otherwise it returns an empty string `("")`.
+func (c *Context) PostForm(key string) string {
+ value, _ := c.GetPostForm(key)
+ return value
+}
+
+// DefaultPostForm returns the specified key from a POST urlencoded form or multipart form
+// when it exists, otherwise it returns the specified defaultValue string.
+// See: PostForm() and GetPostForm() for further information.
+func (c *Context) DefaultPostForm(key, defaultValue string) string {
+ if value, ok := c.GetPostForm(key); ok {
+ return value
+ }
+ return defaultValue
+}
+
+// GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded
+// form or multipart form when it exists `(value, true)` (even when the value is an empty string),
+// otherwise it returns ("", false).
+// For example, during a PATCH request to update the user's email:
+// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com"
+// email= --> ("", true) := GetPostForm("email") // set email to ""
+// --> ("", false) := GetPostForm("email") // do nothing with email
+func (c *Context) GetPostForm(key string) (string, bool) {
+ if values, ok := c.GetPostFormArray(key); ok {
+ return values[0], ok
+ }
+ return "", false
+}
+
+// PostFormArray returns a slice of strings for a given form key.
+// The length of the slice depends on the number of params with the given key.
+func (c *Context) PostFormArray(key string) []string {
+ values, _ := c.GetPostFormArray(key)
+ return values
+}
+
+func (c *Context) initFormCache() {
+ if c.formCache == nil {
+ c.formCache = make(url.Values)
+ req := c.Request
+ if err := req.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil {
+ if err != http.ErrNotMultipart {
+ debugPrint("error on parse multipart form array: %v", err)
+ }
+ }
+ c.formCache = req.PostForm
+ }
+}
+
+// GetPostFormArray returns a slice of strings for a given form key, plus
+// a boolean value whether at least one value exists for the given key.
+func (c *Context) GetPostFormArray(key string) ([]string, bool) {
+ c.initFormCache()
+ if values := c.formCache[key]; len(values) > 0 {
+ return values, true
+ }
+ return []string{}, false
+}
+
+// PostFormMap returns a map for a given form key.
+func (c *Context) PostFormMap(key string) map[string]string {
+ dicts, _ := c.GetPostFormMap(key)
+ return dicts
+}
+
+// GetPostFormMap returns a map for a given form key, plus a boolean value
+// whether at least one value exists for the given key.
+func (c *Context) GetPostFormMap(key string) (map[string]string, bool) {
+ c.initFormCache()
+ return c.get(c.formCache, key)
+}
+
+// get is an internal method and returns a map which satisfy conditions.
+func (c *Context) get(m map[string][]string, key string) (map[string]string, bool) {
+ dicts := make(map[string]string)
+ exist := false
+ for k, v := range m {
+ if i := strings.IndexByte(k, '['); i >= 1 && k[0:i] == key {
+ if j := strings.IndexByte(k[i+1:], ']'); j >= 1 {
+ exist = true
+ dicts[k[i+1:][:j]] = v[0]
+ }
+ }
+ }
+ return dicts, exist
+}
+
+// FormFile returns the first file for the provided form key.
+func (c *Context) FormFile(name string) (*multipart.FileHeader, error) {
+ if c.Request.MultipartForm == nil {
+ if err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil {
+ return nil, err
+ }
+ }
+ f, fh, err := c.Request.FormFile(name)
+ if err != nil {
+ return nil, err
+ }
+ f.Close()
+ return fh, err
+}
+
+// MultipartForm is the parsed multipart form, including file uploads.
+func (c *Context) MultipartForm() (*multipart.Form, error) {
+ err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory)
+ return c.Request.MultipartForm, err
+}
+
+// SaveUploadedFile uploads the form file to specific dst.
+func (c *Context) SaveUploadedFile(file *multipart.FileHeader, dst string) error {
+ src, err := file.Open()
+ if err != nil {
+ return err
+ }
+ defer src.Close()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ _, err = io.Copy(out, src)
+ return err
+}
+
+// Bind checks the Content-Type to select a binding engine automatically,
+// Depending the "Content-Type" header different bindings are used:
+// "application/json" --> JSON binding
+// "application/xml" --> XML binding
+// otherwise --> returns an error.
+// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input.
+// It decodes the json payload into the struct specified as a pointer.
+// It writes a 400 error and sets Content-Type header "text/plain" in the response if input is not valid.
+func (c *Context) Bind(obj interface{}) error {
+ b := binding.Default(c.Request.Method, c.ContentType())
+ return c.MustBindWith(obj, b)
+}
+
+// BindJSON is a shortcut for c.MustBindWith(obj, binding.JSON).
+func (c *Context) BindJSON(obj interface{}) error {
+ return c.MustBindWith(obj, binding.JSON)
+}
+
+// BindXML is a shortcut for c.MustBindWith(obj, binding.BindXML).
+func (c *Context) BindXML(obj interface{}) error {
+ return c.MustBindWith(obj, binding.XML)
+}
+
+// BindQuery is a shortcut for c.MustBindWith(obj, binding.Query).
+func (c *Context) BindQuery(obj interface{}) error {
+ return c.MustBindWith(obj, binding.Query)
+}
+
+// BindYAML is a shortcut for c.MustBindWith(obj, binding.YAML).
+func (c *Context) BindYAML(obj interface{}) error {
+ return c.MustBindWith(obj, binding.YAML)
+}
+
+// BindHeader is a shortcut for c.MustBindWith(obj, binding.Header).
+func (c *Context) BindHeader(obj interface{}) error {
+ return c.MustBindWith(obj, binding.Header)
+}
+
+// BindUri binds the passed struct pointer using binding.Uri.
+// It will abort the request with HTTP 400 if any error occurs.
+func (c *Context) BindUri(obj interface{}) error {
+ if err := c.ShouldBindUri(obj); err != nil {
+ c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck
+ return err
+ }
+ return nil
+}
+
+// MustBindWith binds the passed struct pointer using the specified binding engine.
+// It will abort the request with HTTP 400 if any error occurs.
+// See the binding package.
+func (c *Context) MustBindWith(obj interface{}, b binding.Binding) error {
+ if err := c.ShouldBindWith(obj, b); err != nil {
+ c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck
+ return err
+ }
+ return nil
+}
+
+// ShouldBind checks the Content-Type to select a binding engine automatically,
+// Depending the "Content-Type" header different bindings are used:
+// "application/json" --> JSON binding
+// "application/xml" --> XML binding
+// otherwise --> returns an error
+// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input.
+// It decodes the json payload into the struct specified as a pointer.
+// Like c.Bind() but this method does not set the response status code to 400 and abort if the json is not valid.
+func (c *Context) ShouldBind(obj interface{}) error {
+ b := binding.Default(c.Request.Method, c.ContentType())
+ return c.ShouldBindWith(obj, b)
+}
+
+// ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON).
+func (c *Context) ShouldBindJSON(obj interface{}) error {
+ return c.ShouldBindWith(obj, binding.JSON)
+}
+
+// ShouldBindXML is a shortcut for c.ShouldBindWith(obj, binding.XML).
+func (c *Context) ShouldBindXML(obj interface{}) error {
+ return c.ShouldBindWith(obj, binding.XML)
+}
+
+// ShouldBindQuery is a shortcut for c.ShouldBindWith(obj, binding.Query).
+func (c *Context) ShouldBindQuery(obj interface{}) error {
+ return c.ShouldBindWith(obj, binding.Query)
+}
+
+// ShouldBindYAML is a shortcut for c.ShouldBindWith(obj, binding.YAML).
+func (c *Context) ShouldBindYAML(obj interface{}) error {
+ return c.ShouldBindWith(obj, binding.YAML)
+}
+
+// ShouldBindHeader is a shortcut for c.ShouldBindWith(obj, binding.Header).
+func (c *Context) ShouldBindHeader(obj interface{}) error {
+ return c.ShouldBindWith(obj, binding.Header)
+}
+
+// ShouldBindUri binds the passed struct pointer using the specified binding engine.
+func (c *Context) ShouldBindUri(obj interface{}) error {
+ m := make(map[string][]string)
+ for _, v := range c.Params {
+ m[v.Key] = []string{v.Value}
+ }
+ return binding.Uri.BindUri(m, obj)
+}
+
+// ShouldBindWith binds the passed struct pointer using the specified binding engine.
+// See the binding package.
+func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error {
+ return b.Bind(c.Request, obj)
+}
+
+// ShouldBindBodyWith is similar with ShouldBindWith, but it stores the request
+// body into the context, and reuse when it is called again.
+//
+// NOTE: This method reads the body before binding. So you should use
+// ShouldBindWith for better performance if you need to call only once.
+func (c *Context) ShouldBindBodyWith(obj interface{}, bb binding.BindingBody) (err error) {
+ var body []byte
+ if cb, ok := c.Get(BodyBytesKey); ok {
+ if cbb, ok := cb.([]byte); ok {
+ body = cbb
+ }
+ }
+ if body == nil {
+ body, err = ioutil.ReadAll(c.Request.Body)
+ if err != nil {
+ return err
+ }
+ c.Set(BodyBytesKey, body)
+ }
+ return bb.BindBody(body, obj)
+}
+
+// ClientIP implements a best effort algorithm to return the real client IP.
+// It called c.RemoteIP() under the hood, to check if the remote IP is a trusted proxy or not.
+// If it's it will then try to parse the headers defined in Engine.RemoteIPHeaders (defaulting to [X-Forwarded-For, X-Real-Ip]).
+// If the headers are nots syntactically valid OR the remote IP does not correspong to a trusted proxy,
+// the remote IP (coming form Request.RemoteAddr) is returned.
+func (c *Context) ClientIP() string {
+ if c.engine.AppEngine {
+ if addr := c.requestHeader("X-Appengine-Remote-Addr"); addr != "" {
+ return addr
+ }
+ }
+
+ remoteIP, trusted := c.RemoteIP()
+ if remoteIP == nil {
+ return ""
+ }
+
+ if trusted && c.engine.ForwardedByClientIP && c.engine.RemoteIPHeaders != nil {
+ for _, headerName := range c.engine.RemoteIPHeaders {
+ ip, valid := validateHeader(c.requestHeader(headerName))
+ if valid {
+ return ip
+ }
+ }
+ }
+ return remoteIP.String()
+}
+
+// RemoteIP parses the IP from Request.RemoteAddr, normalizes and returns the IP (without the port).
+// It also checks if the remoteIP is a trusted proxy or not.
+// In order to perform this validation, it will see if the IP is contained within at least one of the CIDR blocks
+// defined in Engine.TrustedProxies
+func (c *Context) RemoteIP() (net.IP, bool) {
+ ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr))
+ if err != nil {
+ return nil, false
+ }
+ remoteIP := net.ParseIP(ip)
+ if remoteIP == nil {
+ return nil, false
+ }
+
+ if c.engine.trustedCIDRs != nil {
+ for _, cidr := range c.engine.trustedCIDRs {
+ if cidr.Contains(remoteIP) {
+ return remoteIP, true
+ }
+ }
+ }
+
+ return remoteIP, false
+}
+
+func validateHeader(header string) (clientIP string, valid bool) {
+ if header == "" {
+ return "", false
+ }
+ items := strings.Split(header, ",")
+ for i, ipStr := range items {
+ ipStr = strings.TrimSpace(ipStr)
+ ip := net.ParseIP(ipStr)
+ if ip == nil {
+ return "", false
+ }
+
+ // We need to return the first IP in the list, but,
+ // we should not early return since we need to validate that
+ // the rest of the header is syntactically valid
+ if i == 0 {
+ clientIP = ipStr
+ valid = true
+ }
+ }
+ return
+}
+
+// ContentType returns the Content-Type header of the request.
+func (c *Context) ContentType() string {
+ return filterFlags(c.requestHeader("Content-Type"))
+}
+
+// IsWebsocket returns true if the request headers indicate that a websocket
+// handshake is being initiated by the client.
+func (c *Context) IsWebsocket() bool {
+ if strings.Contains(strings.ToLower(c.requestHeader("Connection")), "upgrade") &&
+ strings.EqualFold(c.requestHeader("Upgrade"), "websocket") {
+ return true
+ }
+ return false
+}
+
+func (c *Context) requestHeader(key string) string {
+ return c.Request.Header.Get(key)
+}
+
+/************************************/
+/******** RESPONSE RENDERING ********/
+/************************************/
+
+// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == http.StatusNoContent:
+ return false
+ case status == http.StatusNotModified:
+ return false
+ }
+ return true
+}
+
+// Status sets the HTTP response code.
+func (c *Context) Status(code int) {
+ c.Writer.WriteHeader(code)
+}
+
+// Header is a intelligent shortcut for c.Writer.Header().Set(key, value).
+// It writes a header in the response.
+// If value == "", this method removes the header `c.Writer.Header().Del(key)`
+func (c *Context) Header(key, value string) {
+ if value == "" {
+ c.Writer.Header().Del(key)
+ return
+ }
+ c.Writer.Header().Set(key, value)
+}
+
+// GetHeader returns value from request headers.
+func (c *Context) GetHeader(key string) string {
+ return c.requestHeader(key)
+}
+
+// GetRawData return stream data.
+func (c *Context) GetRawData() ([]byte, error) {
+ return ioutil.ReadAll(c.Request.Body)
+}
+
+// SetSameSite with cookie
+func (c *Context) SetSameSite(samesite http.SameSite) {
+ c.sameSite = samesite
+}
+
+// SetCookie adds a Set-Cookie header to the ResponseWriter's headers.
+// The provided cookie must have a valid Name. Invalid cookies may be
+// silently dropped.
+func (c *Context) SetCookie(name, value string, maxAge int, path, domain string, secure, httpOnly bool) {
+ if path == "" {
+ path = "/"
+ }
+ http.SetCookie(c.Writer, &http.Cookie{
+ Name: name,
+ Value: url.QueryEscape(value),
+ MaxAge: maxAge,
+ Path: path,
+ Domain: domain,
+ SameSite: c.sameSite,
+ Secure: secure,
+ HttpOnly: httpOnly,
+ })
+}
+
+// Cookie returns the named cookie provided in the request or
+// ErrNoCookie if not found. And return the named cookie is unescaped.
+// If multiple cookies match the given name, only one cookie will
+// be returned.
+func (c *Context) Cookie(name string) (string, error) {
+ cookie, err := c.Request.Cookie(name)
+ if err != nil {
+ return "", err
+ }
+ val, _ := url.QueryUnescape(cookie.Value)
+ return val, nil
+}
+
+// Render writes the response headers and calls render.Render to render data.
+func (c *Context) Render(code int, r render.Render) {
+ c.Status(code)
+
+ if !bodyAllowedForStatus(code) {
+ r.WriteContentType(c.Writer)
+ c.Writer.WriteHeaderNow()
+ return
+ }
+
+ if err := r.Render(c.Writer); err != nil {
+ panic(err)
+ }
+}
+
+// HTML renders the HTTP template specified by its file name.
+// It also updates the HTTP code and sets the Content-Type as "text/html".
+// See http://golang.org/doc/articles/wiki/
+func (c *Context) HTML(code int, name string, obj interface{}) {
+ instance := c.engine.HTMLRender.Instance(name, obj)
+ c.Render(code, instance)
+}
+
+// IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body.
+// It also sets the Content-Type as "application/json".
+// WARNING: we recommend to use this only for development purposes since printing pretty JSON is
+// more CPU and bandwidth consuming. Use Context.JSON() instead.
+func (c *Context) IndentedJSON(code int, obj interface{}) {
+ c.Render(code, render.IndentedJSON{Data: obj})
+}
+
+// SecureJSON serializes the given struct as Secure JSON into the response body.
+// Default prepends "while(1)," to response body if the given struct is array values.
+// It also sets the Content-Type as "application/json".
+func (c *Context) SecureJSON(code int, obj interface{}) {
+ c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj})
+}
+
+// JSONP serializes the given struct as JSON into the response body.
+// It adds padding to response body to request data from a server residing in a different domain than the client.
+// It also sets the Content-Type as "application/javascript".
+func (c *Context) JSONP(code int, obj interface{}) {
+ callback := c.DefaultQuery("callback", "")
+ if callback == "" {
+ c.Render(code, render.JSON{Data: obj})
+ return
+ }
+ c.Render(code, render.JsonpJSON{Callback: callback, Data: obj})
+}
+
+// JSON serializes the given struct as JSON into the response body.
+// It also sets the Content-Type as "application/json".
+func (c *Context) JSON(code int, obj interface{}) {
+ c.Render(code, render.JSON{Data: obj})
+}
+
+// AsciiJSON serializes the given struct as JSON into the response body with unicode to ASCII string.
+// It also sets the Content-Type as "application/json".
+func (c *Context) AsciiJSON(code int, obj interface{}) {
+ c.Render(code, render.AsciiJSON{Data: obj})
+}
+
+// PureJSON serializes the given struct as JSON into the response body.
+// PureJSON, unlike JSON, does not replace special html characters with their unicode entities.
+func (c *Context) PureJSON(code int, obj interface{}) {
+ c.Render(code, render.PureJSON{Data: obj})
+}
+
+// XML serializes the given struct as XML into the response body.
+// It also sets the Content-Type as "application/xml".
+func (c *Context) XML(code int, obj interface{}) {
+ c.Render(code, render.XML{Data: obj})
+}
+
+// YAML serializes the given struct as YAML into the response body.
+func (c *Context) YAML(code int, obj interface{}) {
+ c.Render(code, render.YAML{Data: obj})
+}
+
+// ProtoBuf serializes the given struct as ProtoBuf into the response body.
+func (c *Context) ProtoBuf(code int, obj interface{}) {
+ c.Render(code, render.ProtoBuf{Data: obj})
+}
+
+// String writes the given string into the response body.
+func (c *Context) String(code int, format string, values ...interface{}) {
+ c.Render(code, render.String{Format: format, Data: values})
+}
+
+// Redirect returns a HTTP redirect to the specific location.
+func (c *Context) Redirect(code int, location string) {
+ c.Render(-1, render.Redirect{
+ Code: code,
+ Location: location,
+ Request: c.Request,
+ })
+}
+
+// Data writes some data into the body stream and updates the HTTP code.
+func (c *Context) Data(code int, contentType string, data []byte) {
+ c.Render(code, render.Data{
+ ContentType: contentType,
+ Data: data,
+ })
+}
+
+// DataFromReader writes the specified reader into the body stream and updates the HTTP code.
+func (c *Context) DataFromReader(code int, contentLength int64, contentType string, reader io.Reader, extraHeaders map[string]string) {
+ c.Render(code, render.Reader{
+ Headers: extraHeaders,
+ ContentType: contentType,
+ ContentLength: contentLength,
+ Reader: reader,
+ })
+}
+
+// File writes the specified file into the body stream in an efficient way.
+func (c *Context) File(filepath string) {
+ http.ServeFile(c.Writer, c.Request, filepath)
+}
+
+// FileFromFS writes the specified file from http.FileSystem into the body stream in an efficient way.
+func (c *Context) FileFromFS(filepath string, fs http.FileSystem) {
+ defer func(old string) {
+ c.Request.URL.Path = old
+ }(c.Request.URL.Path)
+
+ c.Request.URL.Path = filepath
+
+ http.FileServer(fs).ServeHTTP(c.Writer, c.Request)
+}
+
+// FileAttachment writes the specified file into the body stream in an efficient way
+// On the client side, the file will typically be downloaded with the given filename
+func (c *Context) FileAttachment(filepath, filename string) {
+ c.Writer.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename))
+ http.ServeFile(c.Writer, c.Request, filepath)
+}
+
+// SSEvent writes a Server-Sent Event into the body stream.
+func (c *Context) SSEvent(name string, message interface{}) {
+ c.Render(-1, sse.Event{
+ Event: name,
+ Data: message,
+ })
+}
+
+// Stream sends a streaming response and returns a boolean
+// indicates "Is client disconnected in middle of stream"
+func (c *Context) Stream(step func(w io.Writer) bool) bool {
+ w := c.Writer
+ clientGone := w.CloseNotify()
+ for {
+ select {
+ case <-clientGone:
+ return true
+ default:
+ keepOpen := step(w)
+ w.Flush()
+ if !keepOpen {
+ return false
+ }
+ }
+ }
+}
+
+/************************************/
+/******** CONTENT NEGOTIATION *******/
+/************************************/
+
+// Negotiate contains all negotiations data.
+type Negotiate struct {
+ Offered []string
+ HTMLName string
+ HTMLData interface{}
+ JSONData interface{}
+ XMLData interface{}
+ YAMLData interface{}
+ Data interface{}
+}
+
+// Negotiate calls different Render according acceptable Accept format.
+func (c *Context) Negotiate(code int, config Negotiate) {
+ switch c.NegotiateFormat(config.Offered...) {
+ case binding.MIMEJSON:
+ data := chooseData(config.JSONData, config.Data)
+ c.JSON(code, data)
+
+ case binding.MIMEHTML:
+ data := chooseData(config.HTMLData, config.Data)
+ c.HTML(code, config.HTMLName, data)
+
+ case binding.MIMEXML:
+ data := chooseData(config.XMLData, config.Data)
+ c.XML(code, data)
+
+ case binding.MIMEYAML:
+ data := chooseData(config.YAMLData, config.Data)
+ c.YAML(code, data)
+
+ default:
+ c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) // nolint: errcheck
+ }
+}
+
+// NegotiateFormat returns an acceptable Accept format.
+func (c *Context) NegotiateFormat(offered ...string) string {
+ assert1(len(offered) > 0, "you must provide at least one offer")
+
+ if c.Accepted == nil {
+ c.Accepted = parseAccept(c.requestHeader("Accept"))
+ }
+ if len(c.Accepted) == 0 {
+ return offered[0]
+ }
+ for _, accepted := range c.Accepted {
+ for _, offer := range offered {
+ // According to RFC 2616 and RFC 2396, non-ASCII characters are not allowed in headers,
+ // therefore we can just iterate over the string without casting it into []rune
+ i := 0
+ for ; i < len(accepted); i++ {
+ if accepted[i] == '*' || offer[i] == '*' {
+ return offer
+ }
+ if accepted[i] != offer[i] {
+ break
+ }
+ }
+ if i == len(accepted) {
+ return offer
+ }
+ }
+ }
+ return ""
+}
+
+// SetAccepted sets Accept header data.
+func (c *Context) SetAccepted(formats ...string) {
+ c.Accepted = formats
+}
+
+/************************************/
+/***** GOLANG.ORG/X/NET/CONTEXT *****/
+/************************************/
+
+// Deadline always returns that there is no deadline (ok==false),
+// maybe you want to use Request.Context().Deadline() instead.
+func (c *Context) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+// Done always returns nil (chan which will wait forever),
+// if you want to abort your work when the connection was closed
+// you should use Request.Context().Done() instead.
+func (c *Context) Done() <-chan struct{} {
+ return nil
+}
+
+// Err always returns nil, maybe you want to use Request.Context().Err() instead.
+func (c *Context) Err() error {
+ return nil
+}
+
+// Value returns the value associated with this context for key, or nil
+// if no value is associated with key. Successive calls to Value with
+// the same key returns the same result.
+func (c *Context) Value(key interface{}) interface{} {
+ if key == 0 {
+ return c.Request
+ }
+ if keyAsString, ok := key.(string); ok {
+ val, _ := c.Get(keyAsString)
+ return val
+ }
+ return nil
+}
diff --git a/vendor/github.com/gin-gonic/gin/context_appengine.go b/vendor/github.com/gin-gonic/gin/context_appengine.go
new file mode 100644
index 0000000..d565843
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/context_appengine.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+//go:build appengine
+// +build appengine
+
+package gin
+
+func init() {
+ defaultAppEngine = true
+}
diff --git a/vendor/github.com/gin-gonic/gin/debug.go b/vendor/github.com/gin-gonic/gin/debug.go
new file mode 100644
index 0000000..4c7cd0c
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/debug.go
@@ -0,0 +1,103 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "fmt"
+ "html/template"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+const ginSupportMinGoVer = 12
+
+// IsDebugging returns true if the framework is running in debug mode.
+// Use SetMode(gin.ReleaseMode) to disable debug mode.
+func IsDebugging() bool {
+ return ginMode == debugCode
+}
+
+// DebugPrintRouteFunc indicates debug log output format.
+var DebugPrintRouteFunc func(httpMethod, absolutePath, handlerName string, nuHandlers int)
+
+func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) {
+ if IsDebugging() {
+ nuHandlers := len(handlers)
+ handlerName := nameOfFunction(handlers.Last())
+ if DebugPrintRouteFunc == nil {
+ debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers)
+ } else {
+ DebugPrintRouteFunc(httpMethod, absolutePath, handlerName, nuHandlers)
+ }
+ }
+}
+
+func debugPrintLoadTemplate(tmpl *template.Template) {
+ if IsDebugging() {
+ var buf strings.Builder
+ for _, tmpl := range tmpl.Templates() {
+ buf.WriteString("\t- ")
+ buf.WriteString(tmpl.Name())
+ buf.WriteString("\n")
+ }
+ debugPrint("Loaded HTML Templates (%d): \n%s\n", len(tmpl.Templates()), buf.String())
+ }
+}
+
+func debugPrint(format string, values ...interface{}) {
+ if IsDebugging() {
+ if !strings.HasSuffix(format, "\n") {
+ format += "\n"
+ }
+ fmt.Fprintf(DefaultWriter, "[GIN-debug] "+format, values...)
+ }
+}
+
+func getMinVer(v string) (uint64, error) {
+ first := strings.IndexByte(v, '.')
+ last := strings.LastIndexByte(v, '.')
+ if first == last {
+ return strconv.ParseUint(v[first+1:], 10, 64)
+ }
+ return strconv.ParseUint(v[first+1:last], 10, 64)
+}
+
+func debugPrintWARNINGDefault() {
+ if v, e := getMinVer(runtime.Version()); e == nil && v <= ginSupportMinGoVer {
+ debugPrint(`[WARNING] Now Gin requires Go 1.12+.
+
+`)
+ }
+ debugPrint(`[WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached.
+
+`)
+}
+
+func debugPrintWARNINGNew() {
+ debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production.
+ - using env: export GIN_MODE=release
+ - using code: gin.SetMode(gin.ReleaseMode)
+
+`)
+}
+
+func debugPrintWARNINGSetHTMLTemplate() {
+ debugPrint(`[WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called
+at initialization. ie. before any route is registered or the router is listening in a socket:
+
+ router := gin.Default()
+ router.SetHTMLTemplate(template) // << good place
+
+`)
+}
+
+func debugPrintError(err error) {
+ if err != nil {
+ if IsDebugging() {
+ fmt.Fprintf(DefaultErrorWriter, "[GIN-debug] [ERROR] %v\n", err)
+ }
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/deprecated.go b/vendor/github.com/gin-gonic/gin/deprecated.go
new file mode 100644
index 0000000..ab44742
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/deprecated.go
@@ -0,0 +1,21 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "log"
+
+ "github.com/gin-gonic/gin/binding"
+)
+
+// BindWith binds the passed struct pointer using the specified binding engine.
+// See the binding package.
+func (c *Context) BindWith(obj interface{}, b binding.Binding) error {
+ log.Println(`BindWith(\"interface{}, binding.Binding\") error is going to
+ be deprecated, please check issue #662 and either use MustBindWith() if you
+ want HTTP 400 to be automatically returned if any error occur, or use
+ ShouldBindWith() if you need to manage the error.`)
+ return c.MustBindWith(obj, b)
+}
diff --git a/vendor/github.com/gin-gonic/gin/doc.go b/vendor/github.com/gin-gonic/gin/doc.go
new file mode 100644
index 0000000..1bd0386
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/doc.go
@@ -0,0 +1,6 @@
+/*
+Package gin implements a HTTP web framework called gin.
+
+See https://gin-gonic.com/ for more information about gin.
+*/
+package gin // import "github.com/gin-gonic/gin"
diff --git a/vendor/github.com/gin-gonic/gin/errors.go b/vendor/github.com/gin-gonic/gin/errors.go
new file mode 100644
index 0000000..0f276c1
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/errors.go
@@ -0,0 +1,174 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/gin-gonic/gin/internal/json"
+)
+
+// ErrorType is an unsigned 64-bit error code as defined in the gin spec.
+type ErrorType uint64
+
+const (
+ // ErrorTypeBind is used when Context.Bind() fails.
+ ErrorTypeBind ErrorType = 1 << 63
+ // ErrorTypeRender is used when Context.Render() fails.
+ ErrorTypeRender ErrorType = 1 << 62
+ // ErrorTypePrivate indicates a private error.
+ ErrorTypePrivate ErrorType = 1 << 0
+ // ErrorTypePublic indicates a public error.
+ ErrorTypePublic ErrorType = 1 << 1
+ // ErrorTypeAny indicates any other error.
+ ErrorTypeAny ErrorType = 1<<64 - 1
+ // ErrorTypeNu indicates any other error.
+ ErrorTypeNu = 2
+)
+
+// Error represents a error's specification.
+type Error struct {
+ Err error
+ Type ErrorType
+ Meta interface{}
+}
+
+type errorMsgs []*Error
+
+var _ error = &Error{}
+
+// SetType sets the error's type.
+func (msg *Error) SetType(flags ErrorType) *Error {
+ msg.Type = flags
+ return msg
+}
+
+// SetMeta sets the error's meta data.
+func (msg *Error) SetMeta(data interface{}) *Error {
+ msg.Meta = data
+ return msg
+}
+
+// JSON creates a properly formatted JSON
+func (msg *Error) JSON() interface{} {
+ jsonData := H{}
+ if msg.Meta != nil {
+ value := reflect.ValueOf(msg.Meta)
+ switch value.Kind() {
+ case reflect.Struct:
+ return msg.Meta
+ case reflect.Map:
+ for _, key := range value.MapKeys() {
+ jsonData[key.String()] = value.MapIndex(key).Interface()
+ }
+ default:
+ jsonData["meta"] = msg.Meta
+ }
+ }
+ if _, ok := jsonData["error"]; !ok {
+ jsonData["error"] = msg.Error()
+ }
+ return jsonData
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (msg *Error) MarshalJSON() ([]byte, error) {
+ return json.Marshal(msg.JSON())
+}
+
+// Error implements the error interface.
+func (msg Error) Error() string {
+ return msg.Err.Error()
+}
+
+// IsType judges one error.
+func (msg *Error) IsType(flags ErrorType) bool {
+ return (msg.Type & flags) > 0
+}
+
+// Unwrap returns the wrapped error, to allow interoperability with errors.Is(), errors.As() and errors.Unwrap()
+func (msg *Error) Unwrap() error {
+ return msg.Err
+}
+
+// ByType returns a readonly copy filtered the byte.
+// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic.
+func (a errorMsgs) ByType(typ ErrorType) errorMsgs {
+ if len(a) == 0 {
+ return nil
+ }
+ if typ == ErrorTypeAny {
+ return a
+ }
+ var result errorMsgs
+ for _, msg := range a {
+ if msg.IsType(typ) {
+ result = append(result, msg)
+ }
+ }
+ return result
+}
+
+// Last returns the last error in the slice. It returns nil if the array is empty.
+// Shortcut for errors[len(errors)-1].
+func (a errorMsgs) Last() *Error {
+ if length := len(a); length > 0 {
+ return a[length-1]
+ }
+ return nil
+}
+
+// Errors returns an array will all the error messages.
+// Example:
+// c.Error(errors.New("first"))
+// c.Error(errors.New("second"))
+// c.Error(errors.New("third"))
+// c.Errors.Errors() // == []string{"first", "second", "third"}
+func (a errorMsgs) Errors() []string {
+ if len(a) == 0 {
+ return nil
+ }
+ errorStrings := make([]string, len(a))
+ for i, err := range a {
+ errorStrings[i] = err.Error()
+ }
+ return errorStrings
+}
+
+func (a errorMsgs) JSON() interface{} {
+ switch length := len(a); length {
+ case 0:
+ return nil
+ case 1:
+ return a.Last().JSON()
+ default:
+ jsonData := make([]interface{}, length)
+ for i, err := range a {
+ jsonData[i] = err.JSON()
+ }
+ return jsonData
+ }
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (a errorMsgs) MarshalJSON() ([]byte, error) {
+ return json.Marshal(a.JSON())
+}
+
+func (a errorMsgs) String() string {
+ if len(a) == 0 {
+ return ""
+ }
+ var buffer strings.Builder
+ for i, msg := range a {
+ fmt.Fprintf(&buffer, "Error #%02d: %s\n", i+1, msg.Err)
+ if msg.Meta != nil {
+ fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta)
+ }
+ }
+ return buffer.String()
+}
diff --git a/vendor/github.com/gin-gonic/gin/fs.go b/vendor/github.com/gin-gonic/gin/fs.go
new file mode 100644
index 0000000..007d9b7
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/fs.go
@@ -0,0 +1,45 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "net/http"
+ "os"
+)
+
+type onlyFilesFS struct {
+ fs http.FileSystem
+}
+
+type neuteredReaddirFile struct {
+ http.File
+}
+
+// Dir returns a http.Filesystem that can be used by http.FileServer(). It is used internally
+// in router.Static().
+// if listDirectory == true, then it works the same as http.Dir() otherwise it returns
+// a filesystem that prevents http.FileServer() to list the directory files.
+func Dir(root string, listDirectory bool) http.FileSystem {
+ fs := http.Dir(root)
+ if listDirectory {
+ return fs
+ }
+ return &onlyFilesFS{fs}
+}
+
+// Open conforms to http.Filesystem.
+func (fs onlyFilesFS) Open(name string) (http.File, error) {
+ f, err := fs.fs.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return neuteredReaddirFile{f}, nil
+}
+
+// Readdir overrides the http.File default implementation.
+func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) {
+ // this disables directory listing
+ return nil, nil
+}
diff --git a/vendor/github.com/gin-gonic/gin/gin.go b/vendor/github.com/gin-gonic/gin/gin.go
new file mode 100644
index 0000000..03a0e12
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/gin.go
@@ -0,0 +1,577 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "fmt"
+ "html/template"
+ "net"
+ "net/http"
+ "os"
+ "path"
+ "strings"
+ "sync"
+
+ "github.com/gin-gonic/gin/internal/bytesconv"
+ "github.com/gin-gonic/gin/render"
+)
+
+const defaultMultipartMemory = 32 << 20 // 32 MB
+
+var (
+ default404Body = []byte("404 page not found")
+ default405Body = []byte("405 method not allowed")
+)
+
+var defaultAppEngine bool
+
+// HandlerFunc defines the handler used by gin middleware as return value.
+type HandlerFunc func(*Context)
+
+// HandlersChain defines a HandlerFunc array.
+type HandlersChain []HandlerFunc
+
+// Last returns the last handler in the chain. ie. the last handler is the main one.
+func (c HandlersChain) Last() HandlerFunc {
+ if length := len(c); length > 0 {
+ return c[length-1]
+ }
+ return nil
+}
+
+// RouteInfo represents a request route's specification which contains method and path and its handler.
+type RouteInfo struct {
+ Method string
+ Path string
+ Handler string
+ HandlerFunc HandlerFunc
+}
+
+// RoutesInfo defines a RouteInfo array.
+type RoutesInfo []RouteInfo
+
+// Engine is the framework's instance, it contains the muxer, middleware and configuration settings.
+// Create an instance of Engine, by using New() or Default()
+type Engine struct {
+ RouterGroup
+
+ // Enables automatic redirection if the current route can't be matched but a
+ // handler for the path with (without) the trailing slash exists.
+ // For example if /foo/ is requested but a route only exists for /foo, the
+ // client is redirected to /foo with http status code 301 for GET requests
+ // and 307 for all other request methods.
+ RedirectTrailingSlash bool
+
+ // If enabled, the router tries to fix the current request path, if no
+ // handle is registered for it.
+ // First superfluous path elements like ../ or // are removed.
+ // Afterwards the router does a case-insensitive lookup of the cleaned path.
+ // If a handle can be found for this route, the router makes a redirection
+ // to the corrected path with status code 301 for GET requests and 307 for
+ // all other request methods.
+ // For example /FOO and /..//Foo could be redirected to /foo.
+ // RedirectTrailingSlash is independent of this option.
+ RedirectFixedPath bool
+
+ // If enabled, the router checks if another method is allowed for the
+ // current route, if the current request can not be routed.
+ // If this is the case, the request is answered with 'Method Not Allowed'
+ // and HTTP status code 405.
+ // If no other Method is allowed, the request is delegated to the NotFound
+ // handler.
+ HandleMethodNotAllowed bool
+
+ // If enabled, client IP will be parsed from the request's headers that
+ // match those stored at `(*gin.Engine).RemoteIPHeaders`. If no IP was
+ // fetched, it falls back to the IP obtained from
+ // `(*gin.Context).Request.RemoteAddr`.
+ ForwardedByClientIP bool
+
+ // List of headers used to obtain the client IP when
+ // `(*gin.Engine).ForwardedByClientIP` is `true` and
+ // `(*gin.Context).Request.RemoteAddr` is matched by at least one of the
+ // network origins of `(*gin.Engine).TrustedProxies`.
+ RemoteIPHeaders []string
+
+ // List of network origins (IPv4 addresses, IPv4 CIDRs, IPv6 addresses or
+ // IPv6 CIDRs) from which to trust request's headers that contain
+ // alternative client IP when `(*gin.Engine).ForwardedByClientIP` is
+ // `true`.
+ TrustedProxies []string
+
+ // #726 #755 If enabled, it will trust some headers starting with
+ // 'X-AppEngine...' for better integration with that PaaS.
+ AppEngine bool
+
+ // If enabled, the url.RawPath will be used to find parameters.
+ UseRawPath bool
+
+ // If true, the path value will be unescaped.
+ // If UseRawPath is false (by default), the UnescapePathValues effectively is true,
+ // as url.Path gonna be used, which is already unescaped.
+ UnescapePathValues bool
+
+ // Value of 'maxMemory' param that is given to http.Request's ParseMultipartForm
+ // method call.
+ MaxMultipartMemory int64
+
+ // RemoveExtraSlash a parameter can be parsed from the URL even with extra slashes.
+ // See the PR #1817 and issue #1644
+ RemoveExtraSlash bool
+
+ delims render.Delims
+ secureJSONPrefix string
+ HTMLRender render.HTMLRender
+ FuncMap template.FuncMap
+ allNoRoute HandlersChain
+ allNoMethod HandlersChain
+ noRoute HandlersChain
+ noMethod HandlersChain
+ pool sync.Pool
+ trees methodTrees
+ maxParams uint16
+ trustedCIDRs []*net.IPNet
+}
+
+var _ IRouter = &Engine{}
+
+// New returns a new blank Engine instance without any middleware attached.
+// By default the configuration is:
+// - RedirectTrailingSlash: true
+// - RedirectFixedPath: false
+// - HandleMethodNotAllowed: false
+// - ForwardedByClientIP: true
+// - UseRawPath: false
+// - UnescapePathValues: true
+func New() *Engine {
+ debugPrintWARNINGNew()
+ engine := &Engine{
+ RouterGroup: RouterGroup{
+ Handlers: nil,
+ basePath: "/",
+ root: true,
+ },
+ FuncMap: template.FuncMap{},
+ RedirectTrailingSlash: true,
+ RedirectFixedPath: false,
+ HandleMethodNotAllowed: false,
+ ForwardedByClientIP: true,
+ RemoteIPHeaders: []string{"X-Forwarded-For", "X-Real-IP"},
+ TrustedProxies: []string{"0.0.0.0/0"},
+ AppEngine: defaultAppEngine,
+ UseRawPath: false,
+ RemoveExtraSlash: false,
+ UnescapePathValues: true,
+ MaxMultipartMemory: defaultMultipartMemory,
+ trees: make(methodTrees, 0, 9),
+ delims: render.Delims{Left: "{{", Right: "}}"},
+ secureJSONPrefix: "while(1);",
+ }
+ engine.RouterGroup.engine = engine
+ engine.pool.New = func() interface{} {
+ return engine.allocateContext()
+ }
+ return engine
+}
+
+// Default returns an Engine instance with the Logger and Recovery middleware already attached.
+func Default() *Engine {
+ debugPrintWARNINGDefault()
+ engine := New()
+ engine.Use(Logger(), Recovery())
+ return engine
+}
+
+func (engine *Engine) allocateContext() *Context {
+ v := make(Params, 0, engine.maxParams)
+ return &Context{engine: engine, params: &v}
+}
+
+// Delims sets template left and right delims and returns a Engine instance.
+func (engine *Engine) Delims(left, right string) *Engine {
+ engine.delims = render.Delims{Left: left, Right: right}
+ return engine
+}
+
+// SecureJsonPrefix sets the secureJSONPrefix used in Context.SecureJSON.
+func (engine *Engine) SecureJsonPrefix(prefix string) *Engine {
+ engine.secureJSONPrefix = prefix
+ return engine
+}
+
+// LoadHTMLGlob loads HTML files identified by glob pattern
+// and associates the result with HTML renderer.
+func (engine *Engine) LoadHTMLGlob(pattern string) {
+ left := engine.delims.Left
+ right := engine.delims.Right
+ templ := template.Must(template.New("").Delims(left, right).Funcs(engine.FuncMap).ParseGlob(pattern))
+
+ if IsDebugging() {
+ debugPrintLoadTemplate(templ)
+ engine.HTMLRender = render.HTMLDebug{Glob: pattern, FuncMap: engine.FuncMap, Delims: engine.delims}
+ return
+ }
+
+ engine.SetHTMLTemplate(templ)
+}
+
+// LoadHTMLFiles loads a slice of HTML files
+// and associates the result with HTML renderer.
+func (engine *Engine) LoadHTMLFiles(files ...string) {
+ if IsDebugging() {
+ engine.HTMLRender = render.HTMLDebug{Files: files, FuncMap: engine.FuncMap, Delims: engine.delims}
+ return
+ }
+
+ templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseFiles(files...))
+ engine.SetHTMLTemplate(templ)
+}
+
+// SetHTMLTemplate associate a template with HTML renderer.
+func (engine *Engine) SetHTMLTemplate(templ *template.Template) {
+ if len(engine.trees) > 0 {
+ debugPrintWARNINGSetHTMLTemplate()
+ }
+
+ engine.HTMLRender = render.HTMLProduction{Template: templ.Funcs(engine.FuncMap)}
+}
+
+// SetFuncMap sets the FuncMap used for template.FuncMap.
+func (engine *Engine) SetFuncMap(funcMap template.FuncMap) {
+ engine.FuncMap = funcMap
+}
+
+// NoRoute adds handlers for NoRoute. It return a 404 code by default.
+func (engine *Engine) NoRoute(handlers ...HandlerFunc) {
+ engine.noRoute = handlers
+ engine.rebuild404Handlers()
+}
+
+// NoMethod sets the handlers called when... TODO.
+func (engine *Engine) NoMethod(handlers ...HandlerFunc) {
+ engine.noMethod = handlers
+ engine.rebuild405Handlers()
+}
+
+// Use attaches a global middleware to the router. ie. the middleware attached though Use() will be
+// included in the handlers chain for every single request. Even 404, 405, static files...
+// For example, this is the right place for a logger or error management middleware.
+func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes {
+ engine.RouterGroup.Use(middleware...)
+ engine.rebuild404Handlers()
+ engine.rebuild405Handlers()
+ return engine
+}
+
+func (engine *Engine) rebuild404Handlers() {
+ engine.allNoRoute = engine.combineHandlers(engine.noRoute)
+}
+
+func (engine *Engine) rebuild405Handlers() {
+ engine.allNoMethod = engine.combineHandlers(engine.noMethod)
+}
+
+func (engine *Engine) addRoute(method, path string, handlers HandlersChain) {
+ assert1(path[0] == '/', "path must begin with '/'")
+ assert1(method != "", "HTTP method can not be empty")
+ assert1(len(handlers) > 0, "there must be at least one handler")
+
+ debugPrintRoute(method, path, handlers)
+
+ root := engine.trees.get(method)
+ if root == nil {
+ root = new(node)
+ root.fullPath = "/"
+ engine.trees = append(engine.trees, methodTree{method: method, root: root})
+ }
+ root.addRoute(path, handlers)
+
+ // Update maxParams
+ if paramsCount := countParams(path); paramsCount > engine.maxParams {
+ engine.maxParams = paramsCount
+ }
+}
+
+// Routes returns a slice of registered routes, including some useful information, such as:
+// the http method, path and the handler name.
+func (engine *Engine) Routes() (routes RoutesInfo) {
+ for _, tree := range engine.trees {
+ routes = iterate("", tree.method, routes, tree.root)
+ }
+ return routes
+}
+
+func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo {
+ path += root.path
+ if len(root.handlers) > 0 {
+ handlerFunc := root.handlers.Last()
+ routes = append(routes, RouteInfo{
+ Method: method,
+ Path: path,
+ Handler: nameOfFunction(handlerFunc),
+ HandlerFunc: handlerFunc,
+ })
+ }
+ for _, child := range root.children {
+ routes = iterate(path, method, routes, child)
+ }
+ return routes
+}
+
+// Run attaches the router to a http.Server and starts listening and serving HTTP requests.
+// It is a shortcut for http.ListenAndServe(addr, router)
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) Run(addr ...string) (err error) {
+ defer func() { debugPrintError(err) }()
+
+ trustedCIDRs, err := engine.prepareTrustedCIDRs()
+ if err != nil {
+ return err
+ }
+ engine.trustedCIDRs = trustedCIDRs
+ address := resolveAddress(addr)
+ debugPrint("Listening and serving HTTP on %s\n", address)
+ err = http.ListenAndServe(address, engine)
+ return
+}
+
+func (engine *Engine) prepareTrustedCIDRs() ([]*net.IPNet, error) {
+ if engine.TrustedProxies == nil {
+ return nil, nil
+ }
+
+ cidr := make([]*net.IPNet, 0, len(engine.TrustedProxies))
+ for _, trustedProxy := range engine.TrustedProxies {
+ if !strings.Contains(trustedProxy, "/") {
+ ip := parseIP(trustedProxy)
+ if ip == nil {
+ return cidr, &net.ParseError{Type: "IP address", Text: trustedProxy}
+ }
+
+ switch len(ip) {
+ case net.IPv4len:
+ trustedProxy += "/32"
+ case net.IPv6len:
+ trustedProxy += "/128"
+ }
+ }
+ _, cidrNet, err := net.ParseCIDR(trustedProxy)
+ if err != nil {
+ return cidr, err
+ }
+ cidr = append(cidr, cidrNet)
+ }
+ return cidr, nil
+}
+
+// parseIP parse a string representation of an IP and returns a net.IP with the
+// minimum byte representation or nil if input is invalid.
+func parseIP(ip string) net.IP {
+ parsedIP := net.ParseIP(ip)
+
+ if ipv4 := parsedIP.To4(); ipv4 != nil {
+ // return ip in a 4-byte representation
+ return ipv4
+ }
+
+ // return ip in a 16-byte representation or nil
+ return parsedIP
+}
+
+// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests.
+// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router)
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) RunTLS(addr, certFile, keyFile string) (err error) {
+ debugPrint("Listening and serving HTTPS on %s\n", addr)
+ defer func() { debugPrintError(err) }()
+
+ err = http.ListenAndServeTLS(addr, certFile, keyFile, engine)
+ return
+}
+
+// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests
+// through the specified unix socket (ie. a file).
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) RunUnix(file string) (err error) {
+ debugPrint("Listening and serving HTTP on unix:/%s", file)
+ defer func() { debugPrintError(err) }()
+
+ listener, err := net.Listen("unix", file)
+ if err != nil {
+ return
+ }
+ defer listener.Close()
+ defer os.Remove(file)
+
+ err = http.Serve(listener, engine)
+ return
+}
+
+// RunFd attaches the router to a http.Server and starts listening and serving HTTP requests
+// through the specified file descriptor.
+// Note: this method will block the calling goroutine indefinitely unless an error happens.
+func (engine *Engine) RunFd(fd int) (err error) {
+ debugPrint("Listening and serving HTTP on fd@%d", fd)
+ defer func() { debugPrintError(err) }()
+
+ f := os.NewFile(uintptr(fd), fmt.Sprintf("fd@%d", fd))
+ listener, err := net.FileListener(f)
+ if err != nil {
+ return
+ }
+ defer listener.Close()
+ err = engine.RunListener(listener)
+ return
+}
+
+// RunListener attaches the router to a http.Server and starts listening and serving HTTP requests
+// through the specified net.Listener
+func (engine *Engine) RunListener(listener net.Listener) (err error) {
+ debugPrint("Listening and serving HTTP on listener what's bind with address@%s", listener.Addr())
+ defer func() { debugPrintError(err) }()
+ err = http.Serve(listener, engine)
+ return
+}
+
+// ServeHTTP conforms to the http.Handler interface.
+func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ c := engine.pool.Get().(*Context)
+ c.writermem.reset(w)
+ c.Request = req
+ c.reset()
+
+ engine.handleHTTPRequest(c)
+
+ engine.pool.Put(c)
+}
+
+// HandleContext re-enter a context that has been rewritten.
+// This can be done by setting c.Request.URL.Path to your new target.
+// Disclaimer: You can loop yourself to death with this, use wisely.
+func (engine *Engine) HandleContext(c *Context) {
+ oldIndexValue := c.index
+ c.reset()
+ engine.handleHTTPRequest(c)
+
+ c.index = oldIndexValue
+}
+
+func (engine *Engine) handleHTTPRequest(c *Context) {
+ httpMethod := c.Request.Method
+ rPath := c.Request.URL.Path
+ unescape := false
+ if engine.UseRawPath && len(c.Request.URL.RawPath) > 0 {
+ rPath = c.Request.URL.RawPath
+ unescape = engine.UnescapePathValues
+ }
+
+ if engine.RemoveExtraSlash {
+ rPath = cleanPath(rPath)
+ }
+
+ // Find root of the tree for the given HTTP method
+ t := engine.trees
+ for i, tl := 0, len(t); i < tl; i++ {
+ if t[i].method != httpMethod {
+ continue
+ }
+ root := t[i].root
+ // Find route in tree
+ value := root.getValue(rPath, c.params, unescape)
+ if value.params != nil {
+ c.Params = *value.params
+ }
+ if value.handlers != nil {
+ c.handlers = value.handlers
+ c.fullPath = value.fullPath
+ c.Next()
+ c.writermem.WriteHeaderNow()
+ return
+ }
+ if httpMethod != "CONNECT" && rPath != "/" {
+ if value.tsr && engine.RedirectTrailingSlash {
+ redirectTrailingSlash(c)
+ return
+ }
+ if engine.RedirectFixedPath && redirectFixedPath(c, root, engine.RedirectFixedPath) {
+ return
+ }
+ }
+ break
+ }
+
+ if engine.HandleMethodNotAllowed {
+ for _, tree := range engine.trees {
+ if tree.method == httpMethod {
+ continue
+ }
+ if value := tree.root.getValue(rPath, nil, unescape); value.handlers != nil {
+ c.handlers = engine.allNoMethod
+ serveError(c, http.StatusMethodNotAllowed, default405Body)
+ return
+ }
+ }
+ }
+ c.handlers = engine.allNoRoute
+ serveError(c, http.StatusNotFound, default404Body)
+}
+
+var mimePlain = []string{MIMEPlain}
+
+func serveError(c *Context, code int, defaultMessage []byte) {
+ c.writermem.status = code
+ c.Next()
+ if c.writermem.Written() {
+ return
+ }
+ if c.writermem.Status() == code {
+ c.writermem.Header()["Content-Type"] = mimePlain
+ _, err := c.Writer.Write(defaultMessage)
+ if err != nil {
+ debugPrint("cannot write message to writer during serve error: %v", err)
+ }
+ return
+ }
+ c.writermem.WriteHeaderNow()
+}
+
+func redirectTrailingSlash(c *Context) {
+ req := c.Request
+ p := req.URL.Path
+ if prefix := path.Clean(c.Request.Header.Get("X-Forwarded-Prefix")); prefix != "." {
+ p = prefix + "/" + req.URL.Path
+ }
+ req.URL.Path = p + "/"
+ if length := len(p); length > 1 && p[length-1] == '/' {
+ req.URL.Path = p[:length-1]
+ }
+ redirectRequest(c)
+}
+
+func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool {
+ req := c.Request
+ rPath := req.URL.Path
+
+ if fixedPath, ok := root.findCaseInsensitivePath(cleanPath(rPath), trailingSlash); ok {
+ req.URL.Path = bytesconv.BytesToString(fixedPath)
+ redirectRequest(c)
+ return true
+ }
+ return false
+}
+
+func redirectRequest(c *Context) {
+ req := c.Request
+ rPath := req.URL.Path
+ rURL := req.URL.String()
+
+ code := http.StatusMovedPermanently // Permanent redirect, request with GET method
+ if req.Method != http.MethodGet {
+ code = http.StatusTemporaryRedirect
+ }
+ debugPrint("redirecting request %d: %s --> %s", code, rPath, rURL)
+ http.Redirect(c.Writer, req, rURL, code)
+ c.writermem.WriteHeaderNow()
+}
diff --git a/vendor/github.com/gin-gonic/gin/go.mod b/vendor/github.com/gin-gonic/gin/go.mod
new file mode 100644
index 0000000..884ff85
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/go.mod
@@ -0,0 +1,14 @@
+module github.com/gin-gonic/gin
+
+go 1.13
+
+require (
+ github.com/gin-contrib/sse v0.1.0
+ github.com/go-playground/validator/v10 v10.4.1
+ github.com/golang/protobuf v1.3.3
+ github.com/json-iterator/go v1.1.9
+ github.com/mattn/go-isatty v0.0.12
+ github.com/stretchr/testify v1.4.0
+ github.com/ugorji/go/codec v1.1.7
+ gopkg.in/yaml.v2 v2.2.8
+)
diff --git a/vendor/github.com/gin-gonic/gin/go.sum b/vendor/github.com/gin-gonic/gin/go.sum
new file mode 100644
index 0000000..a64b331
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/go.sum
@@ -0,0 +1,52 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
+github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
+github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go b/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go
new file mode 100644
index 0000000..86e4c4d
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go
@@ -0,0 +1,24 @@
+// Copyright 2020 Gin Core Team. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package bytesconv
+
+import (
+ "unsafe"
+)
+
+// StringToBytes converts string to byte slice without a memory allocation.
+func StringToBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(
+ &struct {
+ string
+ Cap int
+ }{s, len(s)},
+ ))
+}
+
+// BytesToString converts byte slice to string without a memory allocation.
+func BytesToString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
diff --git a/vendor/github.com/gin-gonic/gin/internal/json/json.go b/vendor/github.com/gin-gonic/gin/internal/json/json.go
new file mode 100644
index 0000000..172aeb2
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/internal/json/json.go
@@ -0,0 +1,23 @@
+// Copyright 2017 Bo-Yi Wu. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+//go:build !jsoniter
+// +build !jsoniter
+
+package json
+
+import "encoding/json"
+
+var (
+ // Marshal is exported by gin/json package.
+ Marshal = json.Marshal
+ // Unmarshal is exported by gin/json package.
+ Unmarshal = json.Unmarshal
+ // MarshalIndent is exported by gin/json package.
+ MarshalIndent = json.MarshalIndent
+ // NewDecoder is exported by gin/json package.
+ NewDecoder = json.NewDecoder
+ // NewEncoder is exported by gin/json package.
+ NewEncoder = json.NewEncoder
+)
diff --git a/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go b/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go
new file mode 100644
index 0000000..232f8dc
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go
@@ -0,0 +1,24 @@
+// Copyright 2017 Bo-Yi Wu. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+//go:build jsoniter
+// +build jsoniter
+
+package json
+
+import jsoniter "github.com/json-iterator/go"
+
+var (
+ json = jsoniter.ConfigCompatibleWithStandardLibrary
+ // Marshal is exported by gin/json package.
+ Marshal = json.Marshal
+ // Unmarshal is exported by gin/json package.
+ Unmarshal = json.Unmarshal
+ // MarshalIndent is exported by gin/json package.
+ MarshalIndent = json.MarshalIndent
+ // NewDecoder is exported by gin/json package.
+ NewDecoder = json.NewDecoder
+ // NewEncoder is exported by gin/json package.
+ NewEncoder = json.NewEncoder
+)
diff --git a/vendor/github.com/gin-gonic/gin/logger.go b/vendor/github.com/gin-gonic/gin/logger.go
new file mode 100644
index 0000000..d361b74
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/logger.go
@@ -0,0 +1,271 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/mattn/go-isatty"
+)
+
+type consoleColorModeValue int
+
+const (
+ autoColor consoleColorModeValue = iota
+ disableColor
+ forceColor
+)
+
+const (
+ green = "\033[97;42m"
+ white = "\033[90;47m"
+ yellow = "\033[90;43m"
+ red = "\033[97;41m"
+ blue = "\033[97;44m"
+ magenta = "\033[97;45m"
+ cyan = "\033[97;46m"
+ reset = "\033[0m"
+)
+
+var consoleColorMode = autoColor
+
+// LoggerConfig defines the config for Logger middleware.
+type LoggerConfig struct {
+ // Optional. Default value is gin.defaultLogFormatter
+ Formatter LogFormatter
+
+ // Output is a writer where logs are written.
+ // Optional. Default value is gin.DefaultWriter.
+ Output io.Writer
+
+ // SkipPaths is a url path array which logs are not written.
+ // Optional.
+ SkipPaths []string
+}
+
+// LogFormatter gives the signature of the formatter function passed to LoggerWithFormatter
+type LogFormatter func(params LogFormatterParams) string
+
+// LogFormatterParams is the structure any formatter will be handed when time to log comes
+type LogFormatterParams struct {
+ Request *http.Request
+
+ // TimeStamp shows the time after the server returns a response.
+ TimeStamp time.Time
+ // StatusCode is HTTP response code.
+ StatusCode int
+ // Latency is how much time the server cost to process a certain request.
+ Latency time.Duration
+ // ClientIP equals Context's ClientIP method.
+ ClientIP string
+ // Method is the HTTP method given to the request.
+ Method string
+ // Path is a path the client requests.
+ Path string
+ // ErrorMessage is set if error has occurred in processing the request.
+ ErrorMessage string
+ // isTerm shows whether does gin's output descriptor refers to a terminal.
+ isTerm bool
+ // BodySize is the size of the Response Body
+ BodySize int
+ // Keys are the keys set on the request's context.
+ Keys map[string]interface{}
+}
+
+// StatusCodeColor is the ANSI color for appropriately logging http status code to a terminal.
+func (p *LogFormatterParams) StatusCodeColor() string {
+ code := p.StatusCode
+
+ switch {
+ case code >= http.StatusOK && code < http.StatusMultipleChoices:
+ return green
+ case code >= http.StatusMultipleChoices && code < http.StatusBadRequest:
+ return white
+ case code >= http.StatusBadRequest && code < http.StatusInternalServerError:
+ return yellow
+ default:
+ return red
+ }
+}
+
+// MethodColor is the ANSI color for appropriately logging http method to a terminal.
+func (p *LogFormatterParams) MethodColor() string {
+ method := p.Method
+
+ switch method {
+ case http.MethodGet:
+ return blue
+ case http.MethodPost:
+ return cyan
+ case http.MethodPut:
+ return yellow
+ case http.MethodDelete:
+ return red
+ case http.MethodPatch:
+ return green
+ case http.MethodHead:
+ return magenta
+ case http.MethodOptions:
+ return white
+ default:
+ return reset
+ }
+}
+
+// ResetColor resets all escape attributes.
+func (p *LogFormatterParams) ResetColor() string {
+ return reset
+}
+
+// IsOutputColor indicates whether can colors be outputted to the log.
+func (p *LogFormatterParams) IsOutputColor() bool {
+ return consoleColorMode == forceColor || (consoleColorMode == autoColor && p.isTerm)
+}
+
+// defaultLogFormatter is the default log format function Logger middleware uses.
+var defaultLogFormatter = func(param LogFormatterParams) string {
+ var statusColor, methodColor, resetColor string
+ if param.IsOutputColor() {
+ statusColor = param.StatusCodeColor()
+ methodColor = param.MethodColor()
+ resetColor = param.ResetColor()
+ }
+
+ if param.Latency > time.Minute {
+ // Truncate in a golang < 1.8 safe way
+ param.Latency = param.Latency - param.Latency%time.Second
+ }
+ return fmt.Sprintf("[GIN] %v |%s %3d %s| %13v | %15s |%s %-7s %s %#v\n%s",
+ param.TimeStamp.Format("2006/01/02 - 15:04:05"),
+ statusColor, param.StatusCode, resetColor,
+ param.Latency,
+ param.ClientIP,
+ methodColor, param.Method, resetColor,
+ param.Path,
+ param.ErrorMessage,
+ )
+}
+
+// DisableConsoleColor disables color output in the console.
+func DisableConsoleColor() {
+ consoleColorMode = disableColor
+}
+
+// ForceConsoleColor force color output in the console.
+func ForceConsoleColor() {
+ consoleColorMode = forceColor
+}
+
+// ErrorLogger returns a handlerfunc for any error type.
+func ErrorLogger() HandlerFunc {
+ return ErrorLoggerT(ErrorTypeAny)
+}
+
+// ErrorLoggerT returns a handlerfunc for a given error type.
+func ErrorLoggerT(typ ErrorType) HandlerFunc {
+ return func(c *Context) {
+ c.Next()
+ errors := c.Errors.ByType(typ)
+ if len(errors) > 0 {
+ c.JSON(-1, errors)
+ }
+ }
+}
+
+// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter.
+// By default gin.DefaultWriter = os.Stdout.
+func Logger() HandlerFunc {
+ return LoggerWithConfig(LoggerConfig{})
+}
+
+// LoggerWithFormatter instance a Logger middleware with the specified log format function.
+func LoggerWithFormatter(f LogFormatter) HandlerFunc {
+ return LoggerWithConfig(LoggerConfig{
+ Formatter: f,
+ })
+}
+
+// LoggerWithWriter instance a Logger middleware with the specified writer buffer.
+// Example: os.Stdout, a file opened in write mode, a socket...
+func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc {
+ return LoggerWithConfig(LoggerConfig{
+ Output: out,
+ SkipPaths: notlogged,
+ })
+}
+
+// LoggerWithConfig instance a Logger middleware with config.
+func LoggerWithConfig(conf LoggerConfig) HandlerFunc {
+ formatter := conf.Formatter
+ if formatter == nil {
+ formatter = defaultLogFormatter
+ }
+
+ out := conf.Output
+ if out == nil {
+ out = DefaultWriter
+ }
+
+ notlogged := conf.SkipPaths
+
+ isTerm := true
+
+ if w, ok := out.(*os.File); !ok || os.Getenv("TERM") == "dumb" ||
+ (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd())) {
+ isTerm = false
+ }
+
+ var skip map[string]struct{}
+
+ if length := len(notlogged); length > 0 {
+ skip = make(map[string]struct{}, length)
+
+ for _, path := range notlogged {
+ skip[path] = struct{}{}
+ }
+ }
+
+ return func(c *Context) {
+ // Start timer
+ start := time.Now()
+ path := c.Request.URL.Path
+ raw := c.Request.URL.RawQuery
+
+ // Process request
+ c.Next()
+
+ // Log only when path is not being skipped
+ if _, ok := skip[path]; !ok {
+ param := LogFormatterParams{
+ Request: c.Request,
+ isTerm: isTerm,
+ Keys: c.Keys,
+ }
+
+ // Stop timer
+ param.TimeStamp = time.Now()
+ param.Latency = param.TimeStamp.Sub(start)
+
+ param.ClientIP = c.ClientIP()
+ param.Method = c.Request.Method
+ param.StatusCode = c.Writer.Status()
+ param.ErrorMessage = c.Errors.ByType(ErrorTypePrivate).String()
+
+ param.BodySize = c.Writer.Size()
+
+ if raw != "" {
+ path = path + "?" + raw
+ }
+
+ param.Path = path
+
+ fmt.Fprint(out, formatter(param))
+ }
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/mode.go b/vendor/github.com/gin-gonic/gin/mode.go
new file mode 100644
index 0000000..c8813af
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/mode.go
@@ -0,0 +1,92 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "io"
+ "os"
+
+ "github.com/gin-gonic/gin/binding"
+)
+
+// EnvGinMode indicates environment name for gin mode.
+const EnvGinMode = "GIN_MODE"
+
+const (
+ // DebugMode indicates gin mode is debug.
+ DebugMode = "debug"
+ // ReleaseMode indicates gin mode is release.
+ ReleaseMode = "release"
+ // TestMode indicates gin mode is test.
+ TestMode = "test"
+)
+
+const (
+ debugCode = iota
+ releaseCode
+ testCode
+)
+
+// DefaultWriter is the default io.Writer used by Gin for debug output and
+// middleware output like Logger() or Recovery().
+// Note that both Logger and Recovery provides custom ways to configure their
+// output io.Writer.
+// To support coloring in Windows use:
+// import "github.com/mattn/go-colorable"
+// gin.DefaultWriter = colorable.NewColorableStdout()
+var DefaultWriter io.Writer = os.Stdout
+
+// DefaultErrorWriter is the default io.Writer used by Gin to debug errors
+var DefaultErrorWriter io.Writer = os.Stderr
+
+var ginMode = debugCode
+var modeName = DebugMode
+
+func init() {
+ mode := os.Getenv(EnvGinMode)
+ SetMode(mode)
+}
+
+// SetMode sets gin mode according to input string.
+func SetMode(value string) {
+ if value == "" {
+ value = DebugMode
+ }
+
+ switch value {
+ case DebugMode:
+ ginMode = debugCode
+ case ReleaseMode:
+ ginMode = releaseCode
+ case TestMode:
+ ginMode = testCode
+ default:
+ panic("gin mode unknown: " + value + " (available mode: debug release test)")
+ }
+
+ modeName = value
+}
+
+// DisableBindValidation closes the default validator.
+func DisableBindValidation() {
+ binding.Validator = nil
+}
+
+// EnableJsonDecoderUseNumber sets true for binding.EnableDecoderUseNumber to
+// call the UseNumber method on the JSON Decoder instance.
+func EnableJsonDecoderUseNumber() {
+ binding.EnableDecoderUseNumber = true
+}
+
+// EnableJsonDecoderDisallowUnknownFields sets true for binding.EnableDecoderDisallowUnknownFields to
+// call the DisallowUnknownFields method on the JSON Decoder instance.
+func EnableJsonDecoderDisallowUnknownFields() {
+ binding.EnableDecoderDisallowUnknownFields = true
+}
+
+// Mode returns currently gin mode.
+func Mode() string {
+ return modeName
+}
diff --git a/vendor/github.com/gin-gonic/gin/path.go b/vendor/github.com/gin-gonic/gin/path.go
new file mode 100644
index 0000000..d42d6b9
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/path.go
@@ -0,0 +1,150 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Based on the path package, Copyright 2009 The Go Authors.
+// Use of this source code is governed by a BSD-style license that can be found
+// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE.
+
+package gin
+
+// cleanPath is the URL version of path.Clean, it returns a canonical URL path
+// for p, eliminating . and .. elements.
+//
+// The following rules are applied iteratively until no further processing can
+// be done:
+// 1. Replace multiple slashes with a single slash.
+// 2. Eliminate each . path name element (the current directory).
+// 3. Eliminate each inner .. path name element (the parent directory)
+// along with the non-.. element that precedes it.
+// 4. Eliminate .. elements that begin a rooted path:
+// that is, replace "/.." by "/" at the beginning of a path.
+//
+// If the result of this process is an empty string, "/" is returned.
+func cleanPath(p string) string {
+ const stackBufSize = 128
+ // Turn empty string into "/"
+ if p == "" {
+ return "/"
+ }
+
+ // Reasonably sized buffer on stack to avoid allocations in the common case.
+ // If a larger buffer is required, it gets allocated dynamically.
+ buf := make([]byte, 0, stackBufSize)
+
+ n := len(p)
+
+ // Invariants:
+ // reading from path; r is index of next byte to process.
+ // writing to buf; w is index of next byte to write.
+
+ // path must start with '/'
+ r := 1
+ w := 1
+
+ if p[0] != '/' {
+ r = 0
+
+ if n+1 > stackBufSize {
+ buf = make([]byte, n+1)
+ } else {
+ buf = buf[:n+1]
+ }
+ buf[0] = '/'
+ }
+
+ trailing := n > 1 && p[n-1] == '/'
+
+ // A bit more clunky without a 'lazybuf' like the path package, but the loop
+ // gets completely inlined (bufApp calls).
+ // loop has no expensive function calls (except 1x make) // So in contrast to the path package this loop has no expensive function
+ // calls (except make, if needed).
+
+ for r < n {
+ switch {
+ case p[r] == '/':
+ // empty path element, trailing slash is added after the end
+ r++
+
+ case p[r] == '.' && r+1 == n:
+ trailing = true
+ r++
+
+ case p[r] == '.' && p[r+1] == '/':
+ // . element
+ r += 2
+
+ case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'):
+ // .. element: remove to last /
+ r += 3
+
+ if w > 1 {
+ // can backtrack
+ w--
+
+ if len(buf) == 0 {
+ for w > 1 && p[w] != '/' {
+ w--
+ }
+ } else {
+ for w > 1 && buf[w] != '/' {
+ w--
+ }
+ }
+ }
+
+ default:
+ // Real path element.
+ // Add slash if needed
+ if w > 1 {
+ bufApp(&buf, p, w, '/')
+ w++
+ }
+
+ // Copy element
+ for r < n && p[r] != '/' {
+ bufApp(&buf, p, w, p[r])
+ w++
+ r++
+ }
+ }
+ }
+
+ // Re-append trailing slash
+ if trailing && w > 1 {
+ bufApp(&buf, p, w, '/')
+ w++
+ }
+
+ // If the original string was not modified (or only shortened at the end),
+ // return the respective substring of the original string.
+ // Otherwise return a new string from the buffer.
+ if len(buf) == 0 {
+ return p[:w]
+ }
+ return string(buf[:w])
+}
+
+// Internal helper to lazily create a buffer if necessary.
+// Calls to this function get inlined.
+func bufApp(buf *[]byte, s string, w int, c byte) {
+ b := *buf
+ if len(b) == 0 {
+ // No modification of the original string so far.
+ // If the next character is the same as in the original string, we do
+ // not yet have to allocate a buffer.
+ if s[w] == c {
+ return
+ }
+
+ // Otherwise use either the stack buffer, if it is large enough, or
+ // allocate a new buffer on the heap, and copy all previous characters.
+ length := len(s)
+ if length > cap(b) {
+ *buf = make([]byte, length)
+ } else {
+ *buf = (*buf)[:length]
+ }
+ b = *buf
+
+ copy(b, s[:w])
+ }
+ b[w] = c
+}
diff --git a/vendor/github.com/gin-gonic/gin/recovery.go b/vendor/github.com/gin-gonic/gin/recovery.go
new file mode 100644
index 0000000..563f5aa
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/recovery.go
@@ -0,0 +1,171 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "os"
+ "runtime"
+ "strings"
+ "time"
+)
+
+var (
+ dunno = []byte("???")
+ centerDot = []byte("·")
+ dot = []byte(".")
+ slash = []byte("/")
+)
+
+// RecoveryFunc defines the function passable to CustomRecovery.
+type RecoveryFunc func(c *Context, err interface{})
+
+// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.
+func Recovery() HandlerFunc {
+ return RecoveryWithWriter(DefaultErrorWriter)
+}
+
+//CustomRecovery returns a middleware that recovers from any panics and calls the provided handle func to handle it.
+func CustomRecovery(handle RecoveryFunc) HandlerFunc {
+ return RecoveryWithWriter(DefaultErrorWriter, handle)
+}
+
+// RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one.
+func RecoveryWithWriter(out io.Writer, recovery ...RecoveryFunc) HandlerFunc {
+ if len(recovery) > 0 {
+ return CustomRecoveryWithWriter(out, recovery[0])
+ }
+ return CustomRecoveryWithWriter(out, defaultHandleRecovery)
+}
+
+// CustomRecoveryWithWriter returns a middleware for a given writer that recovers from any panics and calls the provided handle func to handle it.
+func CustomRecoveryWithWriter(out io.Writer, handle RecoveryFunc) HandlerFunc {
+ var logger *log.Logger
+ if out != nil {
+ logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags)
+ }
+ return func(c *Context) {
+ defer func() {
+ if err := recover(); err != nil {
+ // Check for a broken connection, as it is not really a
+ // condition that warrants a panic stack trace.
+ var brokenPipe bool
+ if ne, ok := err.(*net.OpError); ok {
+ if se, ok := ne.Err.(*os.SyscallError); ok {
+ if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") {
+ brokenPipe = true
+ }
+ }
+ }
+ if logger != nil {
+ stack := stack(3)
+ httpRequest, _ := httputil.DumpRequest(c.Request, false)
+ headers := strings.Split(string(httpRequest), "\r\n")
+ for idx, header := range headers {
+ current := strings.Split(header, ":")
+ if current[0] == "Authorization" {
+ headers[idx] = current[0] + ": *"
+ }
+ }
+ headersToStr := strings.Join(headers, "\r\n")
+ if brokenPipe {
+ logger.Printf("%s\n%s%s", err, headersToStr, reset)
+ } else if IsDebugging() {
+ logger.Printf("[Recovery] %s panic recovered:\n%s\n%s\n%s%s",
+ timeFormat(time.Now()), headersToStr, err, stack, reset)
+ } else {
+ logger.Printf("[Recovery] %s panic recovered:\n%s\n%s%s",
+ timeFormat(time.Now()), err, stack, reset)
+ }
+ }
+ if brokenPipe {
+ // If the connection is dead, we can't write a status to it.
+ c.Error(err.(error)) // nolint: errcheck
+ c.Abort()
+ } else {
+ handle(c, err)
+ }
+ }
+ }()
+ c.Next()
+ }
+}
+
+func defaultHandleRecovery(c *Context, err interface{}) {
+ c.AbortWithStatus(http.StatusInternalServerError)
+}
+
+// stack returns a nicely formatted stack frame, skipping skip frames.
+func stack(skip int) []byte {
+ buf := new(bytes.Buffer) // the returned data
+ // As we loop, we open files and read them. These variables record the currently
+ // loaded file.
+ var lines [][]byte
+ var lastFile string
+ for i := skip; ; i++ { // Skip the expected number of frames
+ pc, file, line, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ // Print this much at least. If we can't find the source, it won't show.
+ fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc)
+ if file != lastFile {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ continue
+ }
+ lines = bytes.Split(data, []byte{'\n'})
+ lastFile = file
+ }
+ fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line))
+ }
+ return buf.Bytes()
+}
+
+// source returns a space-trimmed slice of the n'th line.
+func source(lines [][]byte, n int) []byte {
+ n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
+ if n < 0 || n >= len(lines) {
+ return dunno
+ }
+ return bytes.TrimSpace(lines[n])
+}
+
+// function returns, if possible, the name of the function containing the PC.
+func function(pc uintptr) []byte {
+ fn := runtime.FuncForPC(pc)
+ if fn == nil {
+ return dunno
+ }
+ name := []byte(fn.Name())
+ // The name includes the path name to the package, which is unnecessary
+ // since the file name is already included. Plus, it has center dots.
+ // That is, we see
+ // runtime/debug.*T·ptrmethod
+ // and want
+ // *T.ptrmethod
+ // Also the package path might contains dot (e.g. code.google.com/...),
+ // so first eliminate the path prefix
+ if lastSlash := bytes.LastIndex(name, slash); lastSlash >= 0 {
+ name = name[lastSlash+1:]
+ }
+ if period := bytes.Index(name, dot); period >= 0 {
+ name = name[period+1:]
+ }
+ name = bytes.Replace(name, centerDot, dot, -1)
+ return name
+}
+
+func timeFormat(t time.Time) string {
+ timeString := t.Format("2006/01/02 - 15:04:05")
+ return timeString
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/data.go b/vendor/github.com/gin-gonic/gin/render/data.go
new file mode 100644
index 0000000..6ba657b
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/data.go
@@ -0,0 +1,25 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import "net/http"
+
+// Data contains ContentType and bytes data.
+type Data struct {
+ ContentType string
+ Data []byte
+}
+
+// Render (Data) writes data with custom ContentType.
+func (r Data) Render(w http.ResponseWriter) (err error) {
+ r.WriteContentType(w)
+ _, err = w.Write(r.Data)
+ return
+}
+
+// WriteContentType (Data) writes custom ContentType.
+func (r Data) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, []string{r.ContentType})
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/html.go b/vendor/github.com/gin-gonic/gin/render/html.go
new file mode 100644
index 0000000..6696ece
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/html.go
@@ -0,0 +1,92 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "html/template"
+ "net/http"
+)
+
+// Delims represents a set of Left and Right delimiters for HTML template rendering.
+type Delims struct {
+ // Left delimiter, defaults to {{.
+ Left string
+ // Right delimiter, defaults to }}.
+ Right string
+}
+
+// HTMLRender interface is to be implemented by HTMLProduction and HTMLDebug.
+type HTMLRender interface {
+ // Instance returns an HTML instance.
+ Instance(string, interface{}) Render
+}
+
+// HTMLProduction contains template reference and its delims.
+type HTMLProduction struct {
+ Template *template.Template
+ Delims Delims
+}
+
+// HTMLDebug contains template delims and pattern and function with file list.
+type HTMLDebug struct {
+ Files []string
+ Glob string
+ Delims Delims
+ FuncMap template.FuncMap
+}
+
+// HTML contains template reference and its name with given interface object.
+type HTML struct {
+ Template *template.Template
+ Name string
+ Data interface{}
+}
+
+var htmlContentType = []string{"text/html; charset=utf-8"}
+
+// Instance (HTMLProduction) returns an HTML instance which it realizes Render interface.
+func (r HTMLProduction) Instance(name string, data interface{}) Render {
+ return HTML{
+ Template: r.Template,
+ Name: name,
+ Data: data,
+ }
+}
+
+// Instance (HTMLDebug) returns an HTML instance which it realizes Render interface.
+func (r HTMLDebug) Instance(name string, data interface{}) Render {
+ return HTML{
+ Template: r.loadTemplate(),
+ Name: name,
+ Data: data,
+ }
+}
+func (r HTMLDebug) loadTemplate() *template.Template {
+ if r.FuncMap == nil {
+ r.FuncMap = template.FuncMap{}
+ }
+ if len(r.Files) > 0 {
+ return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseFiles(r.Files...))
+ }
+ if r.Glob != "" {
+ return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseGlob(r.Glob))
+ }
+ panic("the HTML debug render was created without files or glob pattern")
+}
+
+// Render (HTML) executes template and writes its result with custom ContentType for response.
+func (r HTML) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+
+ if r.Name == "" {
+ return r.Template.Execute(w, r.Data)
+ }
+ return r.Template.ExecuteTemplate(w, r.Name, r.Data)
+}
+
+// WriteContentType (HTML) writes HTML ContentType.
+func (r HTML) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, htmlContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/json.go b/vendor/github.com/gin-gonic/gin/render/json.go
new file mode 100644
index 0000000..4186309
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/json.go
@@ -0,0 +1,193 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+
+ "github.com/gin-gonic/gin/internal/bytesconv"
+ "github.com/gin-gonic/gin/internal/json"
+)
+
+// JSON contains the given interface object.
+type JSON struct {
+ Data interface{}
+}
+
+// IndentedJSON contains the given interface object.
+type IndentedJSON struct {
+ Data interface{}
+}
+
+// SecureJSON contains the given interface object and its prefix.
+type SecureJSON struct {
+ Prefix string
+ Data interface{}
+}
+
+// JsonpJSON contains the given interface object its callback.
+type JsonpJSON struct {
+ Callback string
+ Data interface{}
+}
+
+// AsciiJSON contains the given interface object.
+type AsciiJSON struct {
+ Data interface{}
+}
+
+// PureJSON contains the given interface object.
+type PureJSON struct {
+ Data interface{}
+}
+
+var jsonContentType = []string{"application/json; charset=utf-8"}
+var jsonpContentType = []string{"application/javascript; charset=utf-8"}
+var jsonAsciiContentType = []string{"application/json"}
+
+// Render (JSON) writes data with custom ContentType.
+func (r JSON) Render(w http.ResponseWriter) (err error) {
+ if err = WriteJSON(w, r.Data); err != nil {
+ panic(err)
+ }
+ return
+}
+
+// WriteContentType (JSON) writes JSON ContentType.
+func (r JSON) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, jsonContentType)
+}
+
+// WriteJSON marshals the given interface object and writes it with custom ContentType.
+func WriteJSON(w http.ResponseWriter, obj interface{}) error {
+ writeContentType(w, jsonContentType)
+ jsonBytes, err := json.Marshal(obj)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(jsonBytes)
+ return err
+}
+
+// Render (IndentedJSON) marshals the given interface object and writes it with custom ContentType.
+func (r IndentedJSON) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+ jsonBytes, err := json.MarshalIndent(r.Data, "", " ")
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(jsonBytes)
+ return err
+}
+
+// WriteContentType (IndentedJSON) writes JSON ContentType.
+func (r IndentedJSON) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, jsonContentType)
+}
+
+// Render (SecureJSON) marshals the given interface object and writes it with custom ContentType.
+func (r SecureJSON) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+ jsonBytes, err := json.Marshal(r.Data)
+ if err != nil {
+ return err
+ }
+ // if the jsonBytes is array values
+ if bytes.HasPrefix(jsonBytes, bytesconv.StringToBytes("[")) && bytes.HasSuffix(jsonBytes,
+ bytesconv.StringToBytes("]")) {
+ _, err = w.Write(bytesconv.StringToBytes(r.Prefix))
+ if err != nil {
+ return err
+ }
+ }
+ _, err = w.Write(jsonBytes)
+ return err
+}
+
+// WriteContentType (SecureJSON) writes JSON ContentType.
+func (r SecureJSON) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, jsonContentType)
+}
+
+// Render (JsonpJSON) marshals the given interface object and writes it and its callback with custom ContentType.
+func (r JsonpJSON) Render(w http.ResponseWriter) (err error) {
+ r.WriteContentType(w)
+ ret, err := json.Marshal(r.Data)
+ if err != nil {
+ return err
+ }
+
+ if r.Callback == "" {
+ _, err = w.Write(ret)
+ return err
+ }
+
+ callback := template.JSEscapeString(r.Callback)
+ _, err = w.Write(bytesconv.StringToBytes(callback))
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(bytesconv.StringToBytes("("))
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(ret)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(bytesconv.StringToBytes(");"))
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// WriteContentType (JsonpJSON) writes Javascript ContentType.
+func (r JsonpJSON) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, jsonpContentType)
+}
+
+// Render (AsciiJSON) marshals the given interface object and writes it with custom ContentType.
+func (r AsciiJSON) Render(w http.ResponseWriter) (err error) {
+ r.WriteContentType(w)
+ ret, err := json.Marshal(r.Data)
+ if err != nil {
+ return err
+ }
+
+ var buffer bytes.Buffer
+ for _, r := range bytesconv.BytesToString(ret) {
+ cvt := string(r)
+ if r >= 128 {
+ cvt = fmt.Sprintf("\\u%04x", int64(r))
+ }
+ buffer.WriteString(cvt)
+ }
+
+ _, err = w.Write(buffer.Bytes())
+ return err
+}
+
+// WriteContentType (AsciiJSON) writes JSON ContentType.
+func (r AsciiJSON) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, jsonAsciiContentType)
+}
+
+// Render (PureJSON) writes custom ContentType and encodes the given interface object.
+func (r PureJSON) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+ encoder := json.NewEncoder(w)
+ encoder.SetEscapeHTML(false)
+ return encoder.Encode(r.Data)
+}
+
+// WriteContentType (PureJSON) writes custom ContentType.
+func (r PureJSON) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, jsonContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/msgpack.go b/vendor/github.com/gin-gonic/gin/render/msgpack.go
new file mode 100644
index 0000000..6ef5b6e
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/msgpack.go
@@ -0,0 +1,42 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+//go:build !nomsgpack
+// +build !nomsgpack
+
+package render
+
+import (
+ "net/http"
+
+ "github.com/ugorji/go/codec"
+)
+
+var (
+ _ Render = MsgPack{}
+)
+
+// MsgPack contains the given interface object.
+type MsgPack struct {
+ Data interface{}
+}
+
+var msgpackContentType = []string{"application/msgpack; charset=utf-8"}
+
+// WriteContentType (MsgPack) writes MsgPack ContentType.
+func (r MsgPack) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, msgpackContentType)
+}
+
+// Render (MsgPack) encodes the given interface object and writes data with custom ContentType.
+func (r MsgPack) Render(w http.ResponseWriter) error {
+ return WriteMsgPack(w, r.Data)
+}
+
+// WriteMsgPack writes MsgPack ContentType and encodes the given interface object.
+func WriteMsgPack(w http.ResponseWriter, obj interface{}) error {
+ writeContentType(w, msgpackContentType)
+ var mh codec.MsgpackHandle
+ return codec.NewEncoder(w, &mh).Encode(obj)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/protobuf.go b/vendor/github.com/gin-gonic/gin/render/protobuf.go
new file mode 100644
index 0000000..15aca99
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/protobuf.go
@@ -0,0 +1,36 @@
+// Copyright 2018 Gin Core Team. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ProtoBuf contains the given interface object.
+type ProtoBuf struct {
+ Data interface{}
+}
+
+var protobufContentType = []string{"application/x-protobuf"}
+
+// Render (ProtoBuf) marshals the given interface object and writes data with custom ContentType.
+func (r ProtoBuf) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+
+ bytes, err := proto.Marshal(r.Data.(proto.Message))
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(bytes)
+ return err
+}
+
+// WriteContentType (ProtoBuf) writes ProtoBuf ContentType.
+func (r ProtoBuf) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, protobufContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/reader.go b/vendor/github.com/gin-gonic/gin/render/reader.go
new file mode 100644
index 0000000..d5282e4
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/reader.go
@@ -0,0 +1,48 @@
+// Copyright 2018 Gin Core Team. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "io"
+ "net/http"
+ "strconv"
+)
+
+// Reader contains the IO reader and its length, and custom ContentType and other headers.
+type Reader struct {
+ ContentType string
+ ContentLength int64
+ Reader io.Reader
+ Headers map[string]string
+}
+
+// Render (Reader) writes data with custom ContentType and headers.
+func (r Reader) Render(w http.ResponseWriter) (err error) {
+ r.WriteContentType(w)
+ if r.ContentLength >= 0 {
+ if r.Headers == nil {
+ r.Headers = map[string]string{}
+ }
+ r.Headers["Content-Length"] = strconv.FormatInt(r.ContentLength, 10)
+ }
+ r.writeHeaders(w, r.Headers)
+ _, err = io.Copy(w, r.Reader)
+ return
+}
+
+// WriteContentType (Reader) writes custom ContentType.
+func (r Reader) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, []string{r.ContentType})
+}
+
+// writeHeaders writes custom Header.
+func (r Reader) writeHeaders(w http.ResponseWriter, headers map[string]string) {
+ header := w.Header()
+ for k, v := range headers {
+ if header.Get(k) == "" {
+ header.Set(k, v)
+ }
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/redirect.go b/vendor/github.com/gin-gonic/gin/render/redirect.go
new file mode 100644
index 0000000..c006691
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/redirect.go
@@ -0,0 +1,29 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "fmt"
+ "net/http"
+)
+
+// Redirect contains the http request reference and redirects status code and location.
+type Redirect struct {
+ Code int
+ Request *http.Request
+ Location string
+}
+
+// Render (Redirect) redirects the http request to new location and writes redirect response.
+func (r Redirect) Render(w http.ResponseWriter) error {
+ if (r.Code < http.StatusMultipleChoices || r.Code > http.StatusPermanentRedirect) && r.Code != http.StatusCreated {
+ panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code))
+ }
+ http.Redirect(w, r.Request, r.Location, r.Code)
+ return nil
+}
+
+// WriteContentType (Redirect) don't write any ContentType.
+func (r Redirect) WriteContentType(http.ResponseWriter) {}
diff --git a/vendor/github.com/gin-gonic/gin/render/render.go b/vendor/github.com/gin-gonic/gin/render/render.go
new file mode 100644
index 0000000..bcd568b
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/render.go
@@ -0,0 +1,40 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import "net/http"
+
+// Render interface is to be implemented by JSON, XML, HTML, YAML and so on.
+type Render interface {
+ // Render writes data with custom ContentType.
+ Render(http.ResponseWriter) error
+ // WriteContentType writes custom ContentType.
+ WriteContentType(w http.ResponseWriter)
+}
+
+var (
+ _ Render = JSON{}
+ _ Render = IndentedJSON{}
+ _ Render = SecureJSON{}
+ _ Render = JsonpJSON{}
+ _ Render = XML{}
+ _ Render = String{}
+ _ Render = Redirect{}
+ _ Render = Data{}
+ _ Render = HTML{}
+ _ HTMLRender = HTMLDebug{}
+ _ HTMLRender = HTMLProduction{}
+ _ Render = YAML{}
+ _ Render = Reader{}
+ _ Render = AsciiJSON{}
+ _ Render = ProtoBuf{}
+)
+
+func writeContentType(w http.ResponseWriter, value []string) {
+ header := w.Header()
+ if val := header["Content-Type"]; len(val) == 0 {
+ header["Content-Type"] = value
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/text.go b/vendor/github.com/gin-gonic/gin/render/text.go
new file mode 100644
index 0000000..461b720
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/text.go
@@ -0,0 +1,41 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/gin-gonic/gin/internal/bytesconv"
+)
+
+// String contains the given interface object slice and its format.
+type String struct {
+ Format string
+ Data []interface{}
+}
+
+var plainContentType = []string{"text/plain; charset=utf-8"}
+
+// Render (String) writes data with custom ContentType.
+func (r String) Render(w http.ResponseWriter) error {
+ return WriteString(w, r.Format, r.Data)
+}
+
+// WriteContentType (String) writes Plain ContentType.
+func (r String) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, plainContentType)
+}
+
+// WriteString writes data according to its format and write custom ContentType.
+func WriteString(w http.ResponseWriter, format string, data []interface{}) (err error) {
+ writeContentType(w, plainContentType)
+ if len(data) > 0 {
+ _, err = fmt.Fprintf(w, format, data...)
+ return
+ }
+ _, err = w.Write(bytesconv.StringToBytes(format))
+ return
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/xml.go b/vendor/github.com/gin-gonic/gin/render/xml.go
new file mode 100644
index 0000000..cc5390a
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/xml.go
@@ -0,0 +1,28 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "encoding/xml"
+ "net/http"
+)
+
+// XML contains the given interface object.
+type XML struct {
+ Data interface{}
+}
+
+var xmlContentType = []string{"application/xml; charset=utf-8"}
+
+// Render (XML) encodes the given interface object and writes data with custom ContentType.
+func (r XML) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+ return xml.NewEncoder(w).Encode(r.Data)
+}
+
+// WriteContentType (XML) writes XML ContentType for response.
+func (r XML) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, xmlContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/render/yaml.go b/vendor/github.com/gin-gonic/gin/render/yaml.go
new file mode 100644
index 0000000..0df7836
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/render/yaml.go
@@ -0,0 +1,36 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package render
+
+import (
+ "net/http"
+
+ "gopkg.in/yaml.v2"
+)
+
+// YAML contains the given interface object.
+type YAML struct {
+ Data interface{}
+}
+
+var yamlContentType = []string{"application/x-yaml; charset=utf-8"}
+
+// Render (YAML) marshals the given interface object and writes data with custom ContentType.
+func (r YAML) Render(w http.ResponseWriter) error {
+ r.WriteContentType(w)
+
+ bytes, err := yaml.Marshal(r.Data)
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(bytes)
+ return err
+}
+
+// WriteContentType (YAML) writes YAML ContentType for response.
+func (r YAML) WriteContentType(w http.ResponseWriter) {
+ writeContentType(w, yamlContentType)
+}
diff --git a/vendor/github.com/gin-gonic/gin/response_writer.go b/vendor/github.com/gin-gonic/gin/response_writer.go
new file mode 100644
index 0000000..2682668
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/response_writer.go
@@ -0,0 +1,126 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ noWritten = -1
+ defaultStatus = http.StatusOK
+)
+
+// ResponseWriter ...
+type ResponseWriter interface {
+ http.ResponseWriter
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+
+ // Returns the HTTP response status code of the current request.
+ Status() int
+
+ // Returns the number of bytes already written into the response http body.
+ // See Written()
+ Size() int
+
+ // Writes the string into the response body.
+ WriteString(string) (int, error)
+
+ // Returns true if the response body was already written.
+ Written() bool
+
+ // Forces to write the http header (status code + headers).
+ WriteHeaderNow()
+
+ // get the http.Pusher for server push
+ Pusher() http.Pusher
+}
+
+type responseWriter struct {
+ http.ResponseWriter
+ size int
+ status int
+}
+
+var _ ResponseWriter = &responseWriter{}
+
+func (w *responseWriter) reset(writer http.ResponseWriter) {
+ w.ResponseWriter = writer
+ w.size = noWritten
+ w.status = defaultStatus
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ if code > 0 && w.status != code {
+ if w.Written() {
+ debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code)
+ }
+ w.status = code
+ }
+}
+
+func (w *responseWriter) WriteHeaderNow() {
+ if !w.Written() {
+ w.size = 0
+ w.ResponseWriter.WriteHeader(w.status)
+ }
+}
+
+func (w *responseWriter) Write(data []byte) (n int, err error) {
+ w.WriteHeaderNow()
+ n, err = w.ResponseWriter.Write(data)
+ w.size += n
+ return
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ w.WriteHeaderNow()
+ n, err = io.WriteString(w.ResponseWriter, s)
+ w.size += n
+ return
+}
+
+func (w *responseWriter) Status() int {
+ return w.status
+}
+
+func (w *responseWriter) Size() int {
+ return w.size
+}
+
+func (w *responseWriter) Written() bool {
+ return w.size != noWritten
+}
+
+// Hijack implements the http.Hijacker interface.
+func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ if w.size < 0 {
+ w.size = 0
+ }
+ return w.ResponseWriter.(http.Hijacker).Hijack()
+}
+
+// CloseNotify implements the http.CloseNotify interface.
+func (w *responseWriter) CloseNotify() <-chan bool {
+ return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+
+// Flush implements the http.Flush interface.
+func (w *responseWriter) Flush() {
+ w.WriteHeaderNow()
+ w.ResponseWriter.(http.Flusher).Flush()
+}
+
+func (w *responseWriter) Pusher() (pusher http.Pusher) {
+ if pusher, ok := w.ResponseWriter.(http.Pusher); ok {
+ return pusher
+ }
+ return nil
+}
diff --git a/vendor/github.com/gin-gonic/gin/routergroup.go b/vendor/github.com/gin-gonic/gin/routergroup.go
new file mode 100644
index 0000000..15d9930
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/routergroup.go
@@ -0,0 +1,230 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "net/http"
+ "path"
+ "regexp"
+ "strings"
+)
+
+// IRouter defines all router handle interface includes single and group router.
+type IRouter interface {
+ IRoutes
+ Group(string, ...HandlerFunc) *RouterGroup
+}
+
+// IRoutes defines all router handle interface.
+type IRoutes interface {
+ Use(...HandlerFunc) IRoutes
+
+ Handle(string, string, ...HandlerFunc) IRoutes
+ Any(string, ...HandlerFunc) IRoutes
+ GET(string, ...HandlerFunc) IRoutes
+ POST(string, ...HandlerFunc) IRoutes
+ DELETE(string, ...HandlerFunc) IRoutes
+ PATCH(string, ...HandlerFunc) IRoutes
+ PUT(string, ...HandlerFunc) IRoutes
+ OPTIONS(string, ...HandlerFunc) IRoutes
+ HEAD(string, ...HandlerFunc) IRoutes
+
+ StaticFile(string, string) IRoutes
+ Static(string, string) IRoutes
+ StaticFS(string, http.FileSystem) IRoutes
+}
+
+// RouterGroup is used internally to configure router, a RouterGroup is associated with
+// a prefix and an array of handlers (middleware).
+type RouterGroup struct {
+ Handlers HandlersChain
+ basePath string
+ engine *Engine
+ root bool
+}
+
+var _ IRouter = &RouterGroup{}
+
+// Use adds middleware to the group, see example code in GitHub.
+func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes {
+ group.Handlers = append(group.Handlers, middleware...)
+ return group.returnObj()
+}
+
+// Group creates a new router group. You should add all the routes that have common middlewares or the same path prefix.
+// For example, all the routes that use a common middleware for authorization could be grouped.
+func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup {
+ return &RouterGroup{
+ Handlers: group.combineHandlers(handlers),
+ basePath: group.calculateAbsolutePath(relativePath),
+ engine: group.engine,
+ }
+}
+
+// BasePath returns the base path of router group.
+// For example, if v := router.Group("/rest/n/v1/api"), v.BasePath() is "/rest/n/v1/api".
+func (group *RouterGroup) BasePath() string {
+ return group.basePath
+}
+
+func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes {
+ absolutePath := group.calculateAbsolutePath(relativePath)
+ handlers = group.combineHandlers(handlers)
+ group.engine.addRoute(httpMethod, absolutePath, handlers)
+ return group.returnObj()
+}
+
+// Handle registers a new request handle and middleware with the given path and method.
+// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes.
+// See the example code in GitHub.
+//
+// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut
+// functions can be used.
+//
+// This function is intended for bulk loading and to allow the usage of less
+// frequently used, non-standardized or custom methods (e.g. for internal
+// communication with a proxy).
+func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes {
+ if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil {
+ panic("http method " + httpMethod + " is not valid")
+ }
+ return group.handle(httpMethod, relativePath, handlers)
+}
+
+// POST is a shortcut for router.Handle("POST", path, handle).
+func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle(http.MethodPost, relativePath, handlers)
+}
+
+// GET is a shortcut for router.Handle("GET", path, handle).
+func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle(http.MethodGet, relativePath, handlers)
+}
+
+// DELETE is a shortcut for router.Handle("DELETE", path, handle).
+func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle(http.MethodDelete, relativePath, handlers)
+}
+
+// PATCH is a shortcut for router.Handle("PATCH", path, handle).
+func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle(http.MethodPatch, relativePath, handlers)
+}
+
+// PUT is a shortcut for router.Handle("PUT", path, handle).
+func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle(http.MethodPut, relativePath, handlers)
+}
+
+// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle).
+func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle(http.MethodOptions, relativePath, handlers)
+}
+
+// HEAD is a shortcut for router.Handle("HEAD", path, handle).
+func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes {
+ return group.handle(http.MethodHead, relativePath, handlers)
+}
+
+// Any registers a route that matches all the HTTP methods.
+// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE.
+func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes {
+ group.handle(http.MethodGet, relativePath, handlers)
+ group.handle(http.MethodPost, relativePath, handlers)
+ group.handle(http.MethodPut, relativePath, handlers)
+ group.handle(http.MethodPatch, relativePath, handlers)
+ group.handle(http.MethodHead, relativePath, handlers)
+ group.handle(http.MethodOptions, relativePath, handlers)
+ group.handle(http.MethodDelete, relativePath, handlers)
+ group.handle(http.MethodConnect, relativePath, handlers)
+ group.handle(http.MethodTrace, relativePath, handlers)
+ return group.returnObj()
+}
+
+// StaticFile registers a single route in order to serve a single file of the local filesystem.
+// router.StaticFile("favicon.ico", "./resources/favicon.ico")
+func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes {
+ if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
+ panic("URL parameters can not be used when serving a static file")
+ }
+ handler := func(c *Context) {
+ c.File(filepath)
+ }
+ group.GET(relativePath, handler)
+ group.HEAD(relativePath, handler)
+ return group.returnObj()
+}
+
+// Static serves files from the given file system root.
+// Internally a http.FileServer is used, therefore http.NotFound is used instead
+// of the Router's NotFound handler.
+// To use the operating system's file system implementation,
+// use :
+// router.Static("/static", "/var/www")
+func (group *RouterGroup) Static(relativePath, root string) IRoutes {
+ return group.StaticFS(relativePath, Dir(root, false))
+}
+
+// StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead.
+// Gin by default user: gin.Dir()
+func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes {
+ if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") {
+ panic("URL parameters can not be used when serving a static folder")
+ }
+ handler := group.createStaticHandler(relativePath, fs)
+ urlPattern := path.Join(relativePath, "/*filepath")
+
+ // Register GET and HEAD handlers
+ group.GET(urlPattern, handler)
+ group.HEAD(urlPattern, handler)
+ return group.returnObj()
+}
+
+func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc {
+ absolutePath := group.calculateAbsolutePath(relativePath)
+ fileServer := http.StripPrefix(absolutePath, http.FileServer(fs))
+
+ return func(c *Context) {
+ if _, noListing := fs.(*onlyFilesFS); noListing {
+ c.Writer.WriteHeader(http.StatusNotFound)
+ }
+
+ file := c.Param("filepath")
+ // Check if file exists and/or if we have permission to access it
+ f, err := fs.Open(file)
+ if err != nil {
+ c.Writer.WriteHeader(http.StatusNotFound)
+ c.handlers = group.engine.noRoute
+ // Reset index
+ c.index = -1
+ return
+ }
+ f.Close()
+
+ fileServer.ServeHTTP(c.Writer, c.Request)
+ }
+}
+
+func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain {
+ finalSize := len(group.Handlers) + len(handlers)
+ if finalSize >= int(abortIndex) {
+ panic("too many handlers")
+ }
+ mergedHandlers := make(HandlersChain, finalSize)
+ copy(mergedHandlers, group.Handlers)
+ copy(mergedHandlers[len(group.Handlers):], handlers)
+ return mergedHandlers
+}
+
+func (group *RouterGroup) calculateAbsolutePath(relativePath string) string {
+ return joinPaths(group.basePath, relativePath)
+}
+
+func (group *RouterGroup) returnObj() IRoutes {
+ if group.root {
+ return group.engine
+ }
+ return group
+}
diff --git a/vendor/github.com/gin-gonic/gin/test_helpers.go b/vendor/github.com/gin-gonic/gin/test_helpers.go
new file mode 100644
index 0000000..3a7a5dd
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/test_helpers.go
@@ -0,0 +1,16 @@
+// Copyright 2017 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import "net/http"
+
+// CreateTestContext returns a fresh engine and context for testing purposes
+func CreateTestContext(w http.ResponseWriter) (c *Context, r *Engine) {
+ r = New()
+ c = r.allocateContext()
+ c.reset()
+ c.writermem.reset(w)
+ return
+}
diff --git a/vendor/github.com/gin-gonic/gin/tree.go b/vendor/github.com/gin-gonic/gin/tree.go
new file mode 100644
index 0000000..ca753e6
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/tree.go
@@ -0,0 +1,777 @@
+// Copyright 2013 Julien Schmidt. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found
+// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE
+
+package gin
+
+import (
+ "bytes"
+ "net/url"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/gin-gonic/gin/internal/bytesconv"
+)
+
+var (
+ strColon = []byte(":")
+ strStar = []byte("*")
+)
+
+// Param is a single URL parameter, consisting of a key and a value.
+type Param struct {
+ Key string
+ Value string
+}
+
+// Params is a Param-slice, as returned by the router.
+// The slice is ordered, the first URL parameter is also the first slice value.
+// It is therefore safe to read values by the index.
+type Params []Param
+
+// Get returns the value of the first Param which key matches the given name.
+// If no matching Param is found, an empty string is returned.
+func (ps Params) Get(name string) (string, bool) {
+ for _, entry := range ps {
+ if entry.Key == name {
+ return entry.Value, true
+ }
+ }
+ return "", false
+}
+
+// ByName returns the value of the first Param which key matches the given name.
+// If no matching Param is found, an empty string is returned.
+func (ps Params) ByName(name string) (va string) {
+ va, _ = ps.Get(name)
+ return
+}
+
+type methodTree struct {
+ method string
+ root *node
+}
+
+type methodTrees []methodTree
+
+func (trees methodTrees) get(method string) *node {
+ for _, tree := range trees {
+ if tree.method == method {
+ return tree.root
+ }
+ }
+ return nil
+}
+
+func min(a, b int) int {
+ if a <= b {
+ return a
+ }
+ return b
+}
+
+func longestCommonPrefix(a, b string) int {
+ i := 0
+ max := min(len(a), len(b))
+ for i < max && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+// addChild will add a child node, keeping wildcards at the end
+func (n *node) addChild(child *node) {
+ if n.wildChild && len(n.children) > 0 {
+ wildcardChild := n.children[len(n.children)-1]
+ n.children = append(n.children[:len(n.children)-1], child, wildcardChild)
+ } else {
+ n.children = append(n.children, child)
+ }
+}
+
+func countParams(path string) uint16 {
+ var n uint16
+ s := bytesconv.StringToBytes(path)
+ n += uint16(bytes.Count(s, strColon))
+ n += uint16(bytes.Count(s, strStar))
+ return n
+}
+
+type nodeType uint8
+
+const (
+ static nodeType = iota // default
+ root
+ param
+ catchAll
+)
+
+type node struct {
+ path string
+ indices string
+ wildChild bool
+ nType nodeType
+ priority uint32
+ children []*node // child nodes, at most 1 :param style node at the end of the array
+ handlers HandlersChain
+ fullPath string
+}
+
+// Increments priority of the given child and reorders if necessary
+func (n *node) incrementChildPrio(pos int) int {
+ cs := n.children
+ cs[pos].priority++
+ prio := cs[pos].priority
+
+ // Adjust position (move to front)
+ newPos := pos
+ for ; newPos > 0 && cs[newPos-1].priority < prio; newPos-- {
+ // Swap node positions
+ cs[newPos-1], cs[newPos] = cs[newPos], cs[newPos-1]
+ }
+
+ // Build new index char string
+ if newPos != pos {
+ n.indices = n.indices[:newPos] + // Unchanged prefix, might be empty
+ n.indices[pos:pos+1] + // The index char we move
+ n.indices[newPos:pos] + n.indices[pos+1:] // Rest without char at 'pos'
+ }
+
+ return newPos
+}
+
+// addRoute adds a node with the given handle to the path.
+// Not concurrency-safe!
+func (n *node) addRoute(path string, handlers HandlersChain) {
+ fullPath := path
+ n.priority++
+
+ // Empty tree
+ if len(n.path) == 0 && len(n.children) == 0 {
+ n.insertChild(path, fullPath, handlers)
+ n.nType = root
+ return
+ }
+
+ parentFullPathIndex := 0
+
+walk:
+ for {
+ // Find the longest common prefix.
+ // This also implies that the common prefix contains no ':' or '*'
+ // since the existing key can't contain those chars.
+ i := longestCommonPrefix(path, n.path)
+
+ // Split edge
+ if i < len(n.path) {
+ child := node{
+ path: n.path[i:],
+ wildChild: n.wildChild,
+ indices: n.indices,
+ children: n.children,
+ handlers: n.handlers,
+ priority: n.priority - 1,
+ fullPath: n.fullPath,
+ }
+
+ n.children = []*node{&child}
+ // []byte for proper unicode char conversion, see #65
+ n.indices = bytesconv.BytesToString([]byte{n.path[i]})
+ n.path = path[:i]
+ n.handlers = nil
+ n.wildChild = false
+ n.fullPath = fullPath[:parentFullPathIndex+i]
+ }
+
+ // Make new node a child of this node
+ if i < len(path) {
+ path = path[i:]
+ c := path[0]
+
+ // '/' after param
+ if n.nType == param && c == '/' && len(n.children) == 1 {
+ parentFullPathIndex += len(n.path)
+ n = n.children[0]
+ n.priority++
+ continue walk
+ }
+
+ // Check if a child with the next path byte exists
+ for i, max := 0, len(n.indices); i < max; i++ {
+ if c == n.indices[i] {
+ parentFullPathIndex += len(n.path)
+ i = n.incrementChildPrio(i)
+ n = n.children[i]
+ continue walk
+ }
+ }
+
+ // Otherwise insert it
+ if c != ':' && c != '*' && n.nType != catchAll {
+ // []byte for proper unicode char conversion, see #65
+ n.indices += bytesconv.BytesToString([]byte{c})
+ child := &node{
+ fullPath: fullPath,
+ }
+ n.addChild(child)
+ n.incrementChildPrio(len(n.indices) - 1)
+ n = child
+ } else if n.wildChild {
+ // inserting a wildcard node, need to check if it conflicts with the existing wildcard
+ n = n.children[len(n.children)-1]
+ n.priority++
+
+ // Check if the wildcard matches
+ if len(path) >= len(n.path) && n.path == path[:len(n.path)] &&
+ // Adding a child to a catchAll is not possible
+ n.nType != catchAll &&
+ // Check for longer wildcard, e.g. :name and :names
+ (len(n.path) >= len(path) || path[len(n.path)] == '/') {
+ continue walk
+ }
+
+ // Wildcard conflict
+ pathSeg := path
+ if n.nType != catchAll {
+ pathSeg = strings.SplitN(pathSeg, "/", 2)[0]
+ }
+ prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path
+ panic("'" + pathSeg +
+ "' in new path '" + fullPath +
+ "' conflicts with existing wildcard '" + n.path +
+ "' in existing prefix '" + prefix +
+ "'")
+ }
+
+ n.insertChild(path, fullPath, handlers)
+ return
+ }
+
+ // Otherwise add handle to current node
+ if n.handlers != nil {
+ panic("handlers are already registered for path '" + fullPath + "'")
+ }
+ n.handlers = handlers
+ n.fullPath = fullPath
+ return
+ }
+}
+
+// Search for a wildcard segment and check the name for invalid characters.
+// Returns -1 as index, if no wildcard was found.
+func findWildcard(path string) (wildcard string, i int, valid bool) {
+ // Find start
+ for start, c := range []byte(path) {
+ // A wildcard starts with ':' (param) or '*' (catch-all)
+ if c != ':' && c != '*' {
+ continue
+ }
+
+ // Find end and check for invalid characters
+ valid = true
+ for end, c := range []byte(path[start+1:]) {
+ switch c {
+ case '/':
+ return path[start : start+1+end], start, valid
+ case ':', '*':
+ valid = false
+ }
+ }
+ return path[start:], start, valid
+ }
+ return "", -1, false
+}
+
+func (n *node) insertChild(path string, fullPath string, handlers HandlersChain) {
+ for {
+ // Find prefix until first wildcard
+ wildcard, i, valid := findWildcard(path)
+ if i < 0 { // No wildcard found
+ break
+ }
+
+ // The wildcard name must not contain ':' and '*'
+ if !valid {
+ panic("only one wildcard per path segment is allowed, has: '" +
+ wildcard + "' in path '" + fullPath + "'")
+ }
+
+ // check if the wildcard has a name
+ if len(wildcard) < 2 {
+ panic("wildcards must be named with a non-empty name in path '" + fullPath + "'")
+ }
+
+ if wildcard[0] == ':' { // param
+ if i > 0 {
+ // Insert prefix before the current wildcard
+ n.path = path[:i]
+ path = path[i:]
+ }
+
+ child := &node{
+ nType: param,
+ path: wildcard,
+ fullPath: fullPath,
+ }
+ n.addChild(child)
+ n.wildChild = true
+ n = child
+ n.priority++
+
+ // if the path doesn't end with the wildcard, then there
+ // will be another non-wildcard subpath starting with '/'
+ if len(wildcard) < len(path) {
+ path = path[len(wildcard):]
+
+ child := &node{
+ priority: 1,
+ fullPath: fullPath,
+ }
+ n.addChild(child)
+ n = child
+ continue
+ }
+
+ // Otherwise we're done. Insert the handle in the new leaf
+ n.handlers = handlers
+ return
+ }
+
+ // catchAll
+ if i+len(wildcard) != len(path) {
+ panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'")
+ }
+
+ if len(n.path) > 0 && n.path[len(n.path)-1] == '/' {
+ panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'")
+ }
+
+ // currently fixed width 1 for '/'
+ i--
+ if path[i] != '/' {
+ panic("no / before catch-all in path '" + fullPath + "'")
+ }
+
+ n.path = path[:i]
+
+ // First node: catchAll node with empty path
+ child := &node{
+ wildChild: true,
+ nType: catchAll,
+ fullPath: fullPath,
+ }
+
+ n.addChild(child)
+ n.indices = string('/')
+ n = child
+ n.priority++
+
+ // second node: node holding the variable
+ child = &node{
+ path: path[i:],
+ nType: catchAll,
+ handlers: handlers,
+ priority: 1,
+ fullPath: fullPath,
+ }
+ n.children = []*node{child}
+
+ return
+ }
+
+ // If no wildcard was found, simply insert the path and handle
+ n.path = path
+ n.handlers = handlers
+ n.fullPath = fullPath
+}
+
+// nodeValue holds return values of (*Node).getValue method
+type nodeValue struct {
+ handlers HandlersChain
+ params *Params
+ tsr bool
+ fullPath string
+}
+
+// Returns the handle registered with the given path (key). The values of
+// wildcards are saved to a map.
+// If no handle can be found, a TSR (trailing slash redirect) recommendation is
+// made if a handle exists with an extra (without the) trailing slash for the
+// given path.
+func (n *node) getValue(path string, params *Params, unescape bool) (value nodeValue) {
+walk: // Outer loop for walking the tree
+ for {
+ prefix := n.path
+ if len(path) > len(prefix) {
+ if path[:len(prefix)] == prefix {
+ path = path[len(prefix):]
+
+ // Try all the non-wildcard children first by matching the indices
+ idxc := path[0]
+ for i, c := range []byte(n.indices) {
+ if c == idxc {
+ n = n.children[i]
+ continue walk
+ }
+ }
+
+ // If there is no wildcard pattern, recommend a redirection
+ if !n.wildChild {
+ // Nothing found.
+ // We can recommend to redirect to the same URL without a
+ // trailing slash if a leaf exists for that path.
+ value.tsr = (path == "/" && n.handlers != nil)
+ return
+ }
+
+ // Handle wildcard child, which is always at the end of the array
+ n = n.children[len(n.children)-1]
+
+ switch n.nType {
+ case param:
+ // Find param end (either '/' or path end)
+ end := 0
+ for end < len(path) && path[end] != '/' {
+ end++
+ }
+
+ // Save param value
+ if params != nil {
+ if value.params == nil {
+ value.params = params
+ }
+ // Expand slice within preallocated capacity
+ i := len(*value.params)
+ *value.params = (*value.params)[:i+1]
+ val := path[:end]
+ if unescape {
+ if v, err := url.QueryUnescape(val); err == nil {
+ val = v
+ }
+ }
+ (*value.params)[i] = Param{
+ Key: n.path[1:],
+ Value: val,
+ }
+ }
+
+ // we need to go deeper!
+ if end < len(path) {
+ if len(n.children) > 0 {
+ path = path[end:]
+ n = n.children[0]
+ continue walk
+ }
+
+ // ... but we can't
+ value.tsr = (len(path) == end+1)
+ return
+ }
+
+ if value.handlers = n.handlers; value.handlers != nil {
+ value.fullPath = n.fullPath
+ return
+ }
+ if len(n.children) == 1 {
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists for TSR recommendation
+ n = n.children[0]
+ value.tsr = (n.path == "/" && n.handlers != nil)
+ }
+ return
+
+ case catchAll:
+ // Save param value
+ if params != nil {
+ if value.params == nil {
+ value.params = params
+ }
+ // Expand slice within preallocated capacity
+ i := len(*value.params)
+ *value.params = (*value.params)[:i+1]
+ val := path
+ if unescape {
+ if v, err := url.QueryUnescape(path); err == nil {
+ val = v
+ }
+ }
+ (*value.params)[i] = Param{
+ Key: n.path[2:],
+ Value: val,
+ }
+ }
+
+ value.handlers = n.handlers
+ value.fullPath = n.fullPath
+ return
+
+ default:
+ panic("invalid node type")
+ }
+ }
+ }
+
+ if path == prefix {
+ // We should have reached the node containing the handle.
+ // Check if this node has a handle registered.
+ if value.handlers = n.handlers; value.handlers != nil {
+ value.fullPath = n.fullPath
+ return
+ }
+
+ // If there is no handle for this route, but this route has a
+ // wildcard child, there must be a handle for this path with an
+ // additional trailing slash
+ if path == "/" && n.wildChild && n.nType != root {
+ value.tsr = true
+ return
+ }
+
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists for trailing slash recommendation
+ for i, c := range []byte(n.indices) {
+ if c == '/' {
+ n = n.children[i]
+ value.tsr = (len(n.path) == 1 && n.handlers != nil) ||
+ (n.nType == catchAll && n.children[0].handlers != nil)
+ return
+ }
+ }
+
+ return
+ }
+
+ // Nothing found. We can recommend to redirect to the same URL with an
+ // extra trailing slash if a leaf exists for that path
+ value.tsr = (path == "/") ||
+ (len(prefix) == len(path)+1 && prefix[len(path)] == '/' &&
+ path == prefix[:len(prefix)-1] && n.handlers != nil)
+ return
+ }
+}
+
+// Makes a case-insensitive lookup of the given path and tries to find a handler.
+// It can optionally also fix trailing slashes.
+// It returns the case-corrected path and a bool indicating whether the lookup
+// was successful.
+func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) ([]byte, bool) {
+ const stackBufSize = 128
+
+ // Use a static sized buffer on the stack in the common case.
+ // If the path is too long, allocate a buffer on the heap instead.
+ buf := make([]byte, 0, stackBufSize)
+ if length := len(path) + 1; length > stackBufSize {
+ buf = make([]byte, 0, length)
+ }
+
+ ciPath := n.findCaseInsensitivePathRec(
+ path,
+ buf, // Preallocate enough memory for new path
+ [4]byte{}, // Empty rune buffer
+ fixTrailingSlash,
+ )
+
+ return ciPath, ciPath != nil
+}
+
+// Shift bytes in array by n bytes left
+func shiftNRuneBytes(rb [4]byte, n int) [4]byte {
+ switch n {
+ case 0:
+ return rb
+ case 1:
+ return [4]byte{rb[1], rb[2], rb[3], 0}
+ case 2:
+ return [4]byte{rb[2], rb[3]}
+ case 3:
+ return [4]byte{rb[3]}
+ default:
+ return [4]byte{}
+ }
+}
+
+// Recursive case-insensitive lookup function used by n.findCaseInsensitivePath
+func (n *node) findCaseInsensitivePathRec(path string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) []byte {
+ npLen := len(n.path)
+
+walk: // Outer loop for walking the tree
+ for len(path) >= npLen && (npLen == 0 || strings.EqualFold(path[1:npLen], n.path[1:])) {
+ // Add common prefix to result
+ oldPath := path
+ path = path[npLen:]
+ ciPath = append(ciPath, n.path...)
+
+ if len(path) == 0 {
+ // We should have reached the node containing the handle.
+ // Check if this node has a handle registered.
+ if n.handlers != nil {
+ return ciPath
+ }
+
+ // No handle found.
+ // Try to fix the path by adding a trailing slash
+ if fixTrailingSlash {
+ for i, c := range []byte(n.indices) {
+ if c == '/' {
+ n = n.children[i]
+ if (len(n.path) == 1 && n.handlers != nil) ||
+ (n.nType == catchAll && n.children[0].handlers != nil) {
+ return append(ciPath, '/')
+ }
+ return nil
+ }
+ }
+ }
+ return nil
+ }
+
+ // If this node does not have a wildcard (param or catchAll) child,
+ // we can just look up the next child node and continue to walk down
+ // the tree
+ if !n.wildChild {
+ // Skip rune bytes already processed
+ rb = shiftNRuneBytes(rb, npLen)
+
+ if rb[0] != 0 {
+ // Old rune not finished
+ idxc := rb[0]
+ for i, c := range []byte(n.indices) {
+ if c == idxc {
+ // continue with child node
+ n = n.children[i]
+ npLen = len(n.path)
+ continue walk
+ }
+ }
+ } else {
+ // Process a new rune
+ var rv rune
+
+ // Find rune start.
+ // Runes are up to 4 byte long,
+ // -4 would definitely be another rune.
+ var off int
+ for max := min(npLen, 3); off < max; off++ {
+ if i := npLen - off; utf8.RuneStart(oldPath[i]) {
+ // read rune from cached path
+ rv, _ = utf8.DecodeRuneInString(oldPath[i:])
+ break
+ }
+ }
+
+ // Calculate lowercase bytes of current rune
+ lo := unicode.ToLower(rv)
+ utf8.EncodeRune(rb[:], lo)
+
+ // Skip already processed bytes
+ rb = shiftNRuneBytes(rb, off)
+
+ idxc := rb[0]
+ for i, c := range []byte(n.indices) {
+ // Lowercase matches
+ if c == idxc {
+ // must use a recursive approach since both the
+ // uppercase byte and the lowercase byte might exist
+ // as an index
+ if out := n.children[i].findCaseInsensitivePathRec(
+ path, ciPath, rb, fixTrailingSlash,
+ ); out != nil {
+ return out
+ }
+ break
+ }
+ }
+
+ // If we found no match, the same for the uppercase rune,
+ // if it differs
+ if up := unicode.ToUpper(rv); up != lo {
+ utf8.EncodeRune(rb[:], up)
+ rb = shiftNRuneBytes(rb, off)
+
+ idxc := rb[0]
+ for i, c := range []byte(n.indices) {
+ // Uppercase matches
+ if c == idxc {
+ // Continue with child node
+ n = n.children[i]
+ npLen = len(n.path)
+ continue walk
+ }
+ }
+ }
+ }
+
+ // Nothing found. We can recommend to redirect to the same URL
+ // without a trailing slash if a leaf exists for that path
+ if fixTrailingSlash && path == "/" && n.handlers != nil {
+ return ciPath
+ }
+ return nil
+ }
+
+ n = n.children[0]
+ switch n.nType {
+ case param:
+ // Find param end (either '/' or path end)
+ end := 0
+ for end < len(path) && path[end] != '/' {
+ end++
+ }
+
+ // Add param value to case insensitive path
+ ciPath = append(ciPath, path[:end]...)
+
+ // We need to go deeper!
+ if end < len(path) {
+ if len(n.children) > 0 {
+ // Continue with child node
+ n = n.children[0]
+ npLen = len(n.path)
+ path = path[end:]
+ continue
+ }
+
+ // ... but we can't
+ if fixTrailingSlash && len(path) == end+1 {
+ return ciPath
+ }
+ return nil
+ }
+
+ if n.handlers != nil {
+ return ciPath
+ }
+
+ if fixTrailingSlash && len(n.children) == 1 {
+ // No handle found. Check if a handle for this path + a
+ // trailing slash exists
+ n = n.children[0]
+ if n.path == "/" && n.handlers != nil {
+ return append(ciPath, '/')
+ }
+ }
+
+ return nil
+
+ case catchAll:
+ return append(ciPath, path...)
+
+ default:
+ panic("invalid node type")
+ }
+ }
+
+ // Nothing found.
+ // Try to fix the path by adding / removing a trailing slash
+ if fixTrailingSlash {
+ if path == "/" {
+ return ciPath
+ }
+ if len(path)+1 == npLen && n.path[len(path)] == '/' &&
+ strings.EqualFold(path[1:], n.path[1:len(path)]) && n.handlers != nil {
+ return append(ciPath, n.path...)
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/gin-gonic/gin/utils.go b/vendor/github.com/gin-gonic/gin/utils.go
new file mode 100644
index 0000000..c32f0ee
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/utils.go
@@ -0,0 +1,153 @@
+// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+import (
+ "encoding/xml"
+ "net/http"
+ "os"
+ "path"
+ "reflect"
+ "runtime"
+ "strings"
+)
+
+// BindKey indicates a default bind key.
+const BindKey = "_gin-gonic/gin/bindkey"
+
+// Bind is a helper function for given interface object and returns a Gin middleware.
+func Bind(val interface{}) HandlerFunc {
+ value := reflect.ValueOf(val)
+ if value.Kind() == reflect.Ptr {
+ panic(`Bind struct can not be a pointer. Example:
+ Use: gin.Bind(Struct{}) instead of gin.Bind(&Struct{})
+`)
+ }
+ typ := value.Type()
+
+ return func(c *Context) {
+ obj := reflect.New(typ).Interface()
+ if c.Bind(obj) == nil {
+ c.Set(BindKey, obj)
+ }
+ }
+}
+
+// WrapF is a helper function for wrapping http.HandlerFunc and returns a Gin middleware.
+func WrapF(f http.HandlerFunc) HandlerFunc {
+ return func(c *Context) {
+ f(c.Writer, c.Request)
+ }
+}
+
+// WrapH is a helper function for wrapping http.Handler and returns a Gin middleware.
+func WrapH(h http.Handler) HandlerFunc {
+ return func(c *Context) {
+ h.ServeHTTP(c.Writer, c.Request)
+ }
+}
+
+// H is a shortcut for map[string]interface{}
+type H map[string]interface{}
+
+// MarshalXML allows type H to be used with xml.Marshal.
+func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+ start.Name = xml.Name{
+ Space: "",
+ Local: "map",
+ }
+ if err := e.EncodeToken(start); err != nil {
+ return err
+ }
+ for key, value := range h {
+ elem := xml.StartElement{
+ Name: xml.Name{Space: "", Local: key},
+ Attr: []xml.Attr{},
+ }
+ if err := e.EncodeElement(value, elem); err != nil {
+ return err
+ }
+ }
+
+ return e.EncodeToken(xml.EndElement{Name: start.Name})
+}
+
+func assert1(guard bool, text string) {
+ if !guard {
+ panic(text)
+ }
+}
+
+func filterFlags(content string) string {
+ for i, char := range content {
+ if char == ' ' || char == ';' {
+ return content[:i]
+ }
+ }
+ return content
+}
+
+func chooseData(custom, wildcard interface{}) interface{} {
+ if custom != nil {
+ return custom
+ }
+ if wildcard != nil {
+ return wildcard
+ }
+ panic("negotiation config is invalid")
+}
+
+func parseAccept(acceptHeader string) []string {
+ parts := strings.Split(acceptHeader, ",")
+ out := make([]string, 0, len(parts))
+ for _, part := range parts {
+ if i := strings.IndexByte(part, ';'); i > 0 {
+ part = part[:i]
+ }
+ if part = strings.TrimSpace(part); part != "" {
+ out = append(out, part)
+ }
+ }
+ return out
+}
+
+func lastChar(str string) uint8 {
+ if str == "" {
+ panic("The length of the string can't be 0")
+ }
+ return str[len(str)-1]
+}
+
+func nameOfFunction(f interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name()
+}
+
+func joinPaths(absolutePath, relativePath string) string {
+ if relativePath == "" {
+ return absolutePath
+ }
+
+ finalPath := path.Join(absolutePath, relativePath)
+ if lastChar(relativePath) == '/' && lastChar(finalPath) != '/' {
+ return finalPath + "/"
+ }
+ return finalPath
+}
+
+func resolveAddress(addr []string) string {
+ switch len(addr) {
+ case 0:
+ if port := os.Getenv("PORT"); port != "" {
+ debugPrint("Environment variable PORT=\"%s\"", port)
+ return ":" + port
+ }
+ debugPrint("Environment variable PORT is undefined. Using port :8080 by default")
+ return ":8080"
+ case 1:
+ return addr[0]
+ default:
+ panic("too many parameters")
+ }
+}
diff --git a/vendor/github.com/gin-gonic/gin/version.go b/vendor/github.com/gin-gonic/gin/version.go
new file mode 100644
index 0000000..3647461
--- /dev/null
+++ b/vendor/github.com/gin-gonic/gin/version.go
@@ -0,0 +1,8 @@
+// Copyright 2018 Gin Core Team. All rights reserved.
+// Use of this source code is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package gin
+
+// Version is the current gin framework's version.
+const Version = "v1.7.1"
diff --git a/vendor/github.com/go-playground/form/.gitignore b/vendor/github.com/go-playground/form/.gitignore
new file mode 100644
index 0000000..4b0b7d2
--- /dev/null
+++ b/vendor/github.com/go-playground/form/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+old.txt
+new.txt
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/form/LICENSE b/vendor/github.com/go-playground/form/LICENSE
new file mode 100644
index 0000000..8d8aba1
--- /dev/null
+++ b/vendor/github.com/go-playground/form/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Go Playground
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-playground/form/README.md b/vendor/github.com/go-playground/form/README.md
new file mode 100644
index 0000000..a1cd515
--- /dev/null
+++ b/vendor/github.com/go-playground/form/README.md
@@ -0,0 +1,333 @@
+Package form
+============
+![Project status](https://img.shields.io/badge/version-3.1.4-green.svg)
+[![Build Status](https://semaphoreci.com/api/v1/joeybloggs/form/branches/master/badge.svg)](https://semaphoreci.com/joeybloggs/form)
+[![Coverage Status](https://coveralls.io/repos/github/go-playground/form/badge.svg?branch=master)](https://coveralls.io/github/go-playground/form?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/form)](https://goreportcard.com/report/github.com/go-playground/form)
+[![GoDoc](https://godoc.org/github.com/go-playground/form?status.svg)](https://godoc.org/github.com/go-playground/form)
+![License](https://img.shields.io/dub/l/vibe-d.svg)
+[![Gitter](https://badges.gitter.im/go-playground/form.svg)](https://gitter.im/go-playground/form?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+Package form Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values.
+
+It has the following features:
+
+- Supports map of almost all types.
+- Supports both Numbered and Normal arrays eg. `"Array[0]"` and just `"Array"` with multiple values passed.
+- Slice honours the specified index. eg. if "Slice[2]" is the only Slice value passed down, it will be put at index 2; if slice isn't big enough it will be expanded.
+- Array honours the specified index. eg. if "Array[2]" is the only Array value passed down, it will be put at index 2; if array isn't big enough a warning will be printed and value ignored.
+- Only creates objects as necessary eg. if no `array` or `map` values are passed down, the `array` and `map` are left as their default values in the struct.
+- Allows for Custom Type registration.
+- Handles time.Time using RFC3339 time format by default, but can easily be changed by registering a Custom Type, see below.
+- Handles Encoding & Decoding of almost all Go types eg. can Decode into struct, array, map, int... and Encode a struct, array, map, int...
+
+Common Questions
+
+- Does it support encoding.TextUnmarshaler? No because TextUnmarshaler only accepts []byte but posted values can have multiple values, so is not suitable.
+- Mixing `array/slice` with `array[idx]/slice[idx]`, in which order are they parsed? `array/slice` then `array[idx]/slice[idx]`
+
+Supported Types ( out of the box )
+----------
+
+* `string`
+* `bool`
+* `int`, `int8`, `int16`, `int32`, `int64`
+* `uint`, `uint8`, `uint16`, `uint32`, `uint64`
+* `float32`, `float64`
+* `struct` and `anonymous struct`
+* `interface{}`
+* `time.Time` - by default using RFC3339
+* a `pointer` to one of the above types
+* `slice`, `array`
+* `map`
+* `custom types` can override any of the above types
+* many other types may be supported inherently
+
+**NOTE**: `map`, `struct` and `slice` nesting are ad infinitum.
+
+Installation
+------------
+
+Use go get.
+
+ go get -u github.com/go-playground/form
+
+Then import the form package into your own code.
+
+ import "github.com/go-playground/form"
+
+Usage
+-----
+
+- Use symbol `.` for separating fields/structs. (eg. `structfield.field`)
+- Use `[index or key]` for access to index of a slice/array or key for map. (eg. `arrayfield[0]`, `mapfield[keyvalue]`)
+
+```html
+
+```
+
+Examples
+-------
+
+Decoding
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/url"
+
+ "github.com/go-playground/form"
+)
+
+// Address contains address information
+type Address struct {
+ Name string
+ Phone string
+}
+
+// User contains user information
+type User struct {
+ Name string
+ Age uint8
+ Gender string
+ Address []Address
+ Active bool `form:"active"`
+ MapExample map[string]string
+ NestedMap map[string]map[string]string
+ NestedArray [][]string
+}
+
+// use a single instance of Decoder, it caches struct info
+var decoder *form.Decoder
+
+func main() {
+ decoder = form.NewDecoder()
+
+ // this simulates the results of http.Request's ParseForm() function
+ values := parseForm()
+
+ var user User
+
+ // must pass a pointer
+ err := decoder.Decode(&user, values)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("%#v\n", user)
+}
+
+// this simulates the results of http.Request's ParseForm() function
+func parseForm() url.Values {
+ return url.Values{
+ "Name": []string{"joeybloggs"},
+ "Age": []string{"3"},
+ "Gender": []string{"Male"},
+ "Address[0].Name": []string{"26 Here Blvd."},
+ "Address[0].Phone": []string{"9(999)999-9999"},
+ "Address[1].Name": []string{"26 There Blvd."},
+ "Address[1].Phone": []string{"1(111)111-1111"},
+ "active": []string{"true"},
+ "MapExample[key]": []string{"value"},
+ "NestedMap[key][key]": []string{"value"},
+ "NestedArray[0][0]": []string{"value"},
+ }
+}
+```
+
+Encoding
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "github.com/go-playground/form"
+)
+
+// Address contains address information
+type Address struct {
+ Name string
+ Phone string
+}
+
+// User contains user information
+type User struct {
+ Name string
+ Age uint8
+ Gender string
+ Address []Address
+ Active bool `form:"active"`
+ MapExample map[string]string
+ NestedMap map[string]map[string]string
+ NestedArray [][]string
+}
+
+// use a single instance of Encoder, it caches struct info
+var encoder *form.Encoder
+
+func main() {
+ encoder = form.NewEncoder()
+
+ user := User{
+ Name: "joeybloggs",
+ Age: 3,
+ Gender: "Male",
+ Address: []Address{
+ {Name: "26 Here Blvd.", Phone: "9(999)999-9999"},
+ {Name: "26 There Blvd.", Phone: "1(111)111-1111"},
+ },
+ Active: true,
+ MapExample: map[string]string{"key": "value"},
+ NestedMap: map[string]map[string]string{"key": {"key": "value"}},
+ NestedArray: [][]string{{"value"}},
+ }
+
+ // must pass a pointer
+ values, err := encoder.Encode(&user)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("%#v\n", values)
+}
+```
+
+Registering Custom Types
+--------------
+
+Decoder
+```go
+decoder.RegisterCustomTypeFunc(func(vals []string) (interface{}, error) {
+ return time.Parse("2006-01-02", vals[0])
+ }, time.Time{})
+```
+ADDITIONAL: if a struct type is registered, the function will only be called if a url.Value exists for
+the struct and not just the struct fields eg. url.Values{"User":"Name%3Djoeybloggs"} will call the
+custom type function with 'User' as the type, however url.Values{"User.Name":"joeybloggs"} will not.
+
+
+Encoder
+```go
+encoder.RegisterCustomTypeFunc(func(x interface{}) ([]string, error) {
+ return []string{x.(time.Time).Format("2006-01-02")}, nil
+ }, time.Time{})
+```
+
+Ignoring Fields
+--------------
+you can tell form to ignore fields using `-` in the tag
+```go
+type MyStruct struct {
+ Field string `form:"-"`
+}
+```
+
+Omitempty
+--------------
+you can tell form to omit empty fields using `,omitempty` or `FieldName,omitempty` in the tag
+```go
+type MyStruct struct {
+ Field string `form:",omitempty"`
+ Field2 string `form:"CustomFieldName,omitempty"`
+}
+```
+
+Notes
+------
+To maximize compatibility with other systems the Encoder attempts
+to avoid using array indexes in url.Values if at all possible.
+
+eg.
+```go
+// A struct field of
+Field []string{"1", "2", "3"}
+
+// will be output a url.Value as
+"Field": []string{"1", "2", "3"}
+
+and not
+"Field[0]": []string{"1"}
+"Field[1]": []string{"2"}
+"Field[2]": []string{"3"}
+
+// however there are times where it is unavoidable, like with pointers
+i := int(1)
+Field []*string{nil, nil, &i}
+
+// to avoid index 1 and 2 must use index
+"Field[2]": []string{"1"}
+```
+
+Benchmarks
+------
+###### Run on MacBook Pro (15-inch, 2017) using go version go1.10.1 darwin/amd64
+
+NOTE: the 1 allocation and B/op in the first 4 decodes is actually the struct allocating when passing it in, so primitives are actually zero allocation.
+
+```go
+go test -run=NONE -bench=. -benchmem=true
+goos: darwin
+goarch: amd64
+pkg: github.com/go-playground/form/benchmarks
+
+BenchmarkSimpleUserDecodeStruct-8 5000000 236 ns/op 64 B/op 1 allocs/op
+BenchmarkSimpleUserDecodeStructParallel-8 20000000 82.1 ns/op 64 B/op 1 allocs/op
+BenchmarkSimpleUserEncodeStruct-8 2000000 627 ns/op 485 B/op 10 allocs/op
+BenchmarkSimpleUserEncodeStructParallel-8 10000000 223 ns/op 485 B/op 10 allocs/op
+BenchmarkPrimitivesDecodeStructAllPrimitivesTypes-8 2000000 724 ns/op 96 B/op 1 allocs/op
+BenchmarkPrimitivesDecodeStructAllPrimitivesTypesParallel-8 10000000 246 ns/op 96 B/op 1 allocs/op
+BenchmarkPrimitivesEncodeStructAllPrimitivesTypes-8 500000 3187 ns/op 2977 B/op 36 allocs/op
+BenchmarkPrimitivesEncodeStructAllPrimitivesTypesParallel-8 1000000 1106 ns/op 2977 B/op 36 allocs/op
+BenchmarkComplexArrayDecodeStructAllTypes-8 100000 13748 ns/op 2248 B/op 121 allocs/op
+BenchmarkComplexArrayDecodeStructAllTypesParallel-8 500000 4313 ns/op 2249 B/op 121 allocs/op
+BenchmarkComplexArrayEncodeStructAllTypes-8 200000 10758 ns/op 7113 B/op 104 allocs/op
+BenchmarkComplexArrayEncodeStructAllTypesParallel-8 500000 3532 ns/op 7113 B/op 104 allocs/op
+BenchmarkComplexMapDecodeStructAllTypes-8 100000 17644 ns/op 5305 B/op 130 allocs/op
+BenchmarkComplexMapDecodeStructAllTypesParallel-8 300000 5470 ns/op 5308 B/op 130 allocs/op
+BenchmarkComplexMapEncodeStructAllTypes-8 200000 11155 ns/op 6971 B/op 129 allocs/op
+BenchmarkComplexMapEncodeStructAllTypesParallel-8 500000 3768 ns/op 6971 B/op 129 allocs/op
+BenchmarkDecodeNestedStruct-8 500000 2462 ns/op 384 B/op 14 allocs/op
+BenchmarkDecodeNestedStructParallel-8 2000000 814 ns/op 384 B/op 14 allocs/op
+BenchmarkEncodeNestedStruct-8 1000000 1483 ns/op 693 B/op 16 allocs/op
+BenchmarkEncodeNestedStructParallel-8 3000000 525 ns/op 693 B/op 16 allocs/op
+```
+
+Competitor benchmarks can be found [here](https://github.com/go-playground/form/blob/master/benchmarks/benchmarks.md)
+
+Complimentary Software
+----------------------
+
+Here is a list of software that compliments using this library post decoding.
+
+* [Validator](https://github.com/go-playground/validator) - Go Struct and Field validation, including Cross Field, Cross Struct, Map, Slice and Array diving.
+* [mold](https://github.com/go-playground/mold) - Is a general library to help modify or set data within data structures and other objects.
+
+Package Versioning
+----------
+I'm jumping on the vendoring bandwagon, you should vendor this package as I will not
+be creating different version with gopkg.in like allot of my other libraries.
+
+Why? because my time is spread pretty thin maintaining all of the libraries I have + LIFE,
+it is so freeing not to worry about it and will help me keep pouring out bigger and better
+things for you the community.
+
+License
+------
+Distributed under MIT License, please see license file in code for more details.
diff --git a/vendor/github.com/go-playground/form/cache.go b/vendor/github.com/go-playground/form/cache.go
new file mode 100644
index 0000000..47e5fce
--- /dev/null
+++ b/vendor/github.com/go-playground/form/cache.go
@@ -0,0 +1,133 @@
+package form
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type cacheFields []cachedField
+
+func (s cacheFields) Len() int {
+ return len(s)
+}
+
+func (s cacheFields) Less(i, j int) bool {
+ return !s[i].isAnonymous
+}
+
+func (s cacheFields) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+type cachedField struct {
+ idx int
+ name string
+ isAnonymous bool
+ isOmitEmpty bool
+}
+
+type cachedStruct struct {
+ fields cacheFields
+}
+
+type structCacheMap struct {
+ m atomic.Value // map[reflect.Type]*cachedStruct
+ lock sync.Mutex
+ tagFn TagNameFunc
+}
+
+// TagNameFunc allows for adding of a custom tag name parser
+type TagNameFunc func(field reflect.StructField) string
+
+func newStructCacheMap() *structCacheMap {
+
+ sc := new(structCacheMap)
+ sc.m.Store(make(map[reflect.Type]*cachedStruct))
+
+ return sc
+}
+
+func (s *structCacheMap) Get(key reflect.Type) (value *cachedStruct, ok bool) {
+ value, ok = s.m.Load().(map[reflect.Type]*cachedStruct)[key]
+ return
+}
+
+func (s *structCacheMap) Set(key reflect.Type, value *cachedStruct) {
+
+ m := s.m.Load().(map[reflect.Type]*cachedStruct)
+
+ nm := make(map[reflect.Type]*cachedStruct, len(m)+1)
+ for k, v := range m {
+ nm[k] = v
+ }
+ nm[key] = value
+ s.m.Store(nm)
+}
+
+func (s *structCacheMap) parseStruct(mode Mode, current reflect.Value, key reflect.Type, tagName string) *cachedStruct {
+
+ s.lock.Lock()
+
+ // could have been multiple trying to access, but once first is done this ensures struct
+ // isn't parsed again.
+ cs, ok := s.Get(key)
+ if ok {
+ s.lock.Unlock()
+ return cs
+ }
+
+ typ := current.Type()
+ cs = &cachedStruct{fields: make([]cachedField, 0, 4)} // init 4, betting most structs decoding into have at aleast 4 fields.
+
+ numFields := current.NumField()
+
+ var fld reflect.StructField
+ var name string
+ var idx int
+ var isOmitEmpty bool
+
+ for i := 0; i < numFields; i++ {
+ isOmitEmpty = false
+ fld = typ.Field(i)
+
+ if fld.PkgPath != blank && !fld.Anonymous {
+ continue
+ }
+
+ if s.tagFn != nil {
+ name = s.tagFn(fld)
+ } else {
+ name = fld.Tag.Get(tagName)
+ }
+
+ if name == ignore {
+ continue
+ }
+
+ if mode == ModeExplicit && len(name) == 0 {
+ continue
+ }
+
+ // check for omitempty
+ if idx = strings.LastIndexByte(name, ','); idx != -1 {
+ isOmitEmpty = name[idx+1:] == "omitempty"
+ name = name[:idx]
+ }
+
+ if len(name) == 0 {
+ name = fld.Name
+ }
+
+ cs.fields = append(cs.fields, cachedField{idx: i, name: name, isAnonymous: fld.Anonymous, isOmitEmpty: isOmitEmpty})
+ }
+
+ sort.Sort(cs.fields)
+ s.Set(typ, cs)
+
+ s.lock.Unlock()
+
+ return cs
+}
diff --git a/vendor/github.com/go-playground/form/decoder.go b/vendor/github.com/go-playground/form/decoder.go
new file mode 100644
index 0000000..cf858f0
--- /dev/null
+++ b/vendor/github.com/go-playground/form/decoder.go
@@ -0,0 +1,754 @@
+package form
+
+import (
+ "fmt"
+ "log"
+ "net/url"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+const (
+ errArraySize = "Array size of '%d' is larger than the maximum currently set on the decoder of '%d'. To increase this limit please see, SetMaxArraySize(size uint)"
+ errMissingStartBracket = "Invalid formatting for key '%s' missing '[' bracket"
+ errMissingEndBracket = "Invalid formatting for key '%s' missing ']' bracket"
+)
+
+type decoder struct {
+ d *Decoder
+ errs DecodeErrors
+ dm dataMap
+ values url.Values
+ maxKeyLen int
+ namespace []byte
+}
+
+func (d *decoder) setError(namespace []byte, err error) {
+ if d.errs == nil {
+ d.errs = make(DecodeErrors)
+ }
+
+ d.errs[string(namespace)] = err
+}
+
+func (d *decoder) findAlias(ns string) *recursiveData {
+
+ for i := 0; i < len(d.dm); i++ {
+
+ if d.dm[i].alias == ns {
+ return d.dm[i]
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) parseMapData() {
+
+ // already parsed
+ if len(d.dm) > 0 {
+ return
+ }
+
+ d.maxKeyLen = 0
+ d.dm = d.dm[0:0]
+
+ var i int
+ var idx int
+ var l int
+ var insideBracket bool
+ var rd *recursiveData
+ var isNum bool
+
+ for k := range d.values {
+
+ if len(k) > d.maxKeyLen {
+ d.maxKeyLen = len(k)
+ }
+
+ for i = 0; i < len(k); i++ {
+
+ switch k[i] {
+ case '[':
+ idx = i
+ insideBracket = true
+ isNum = true
+ case ']':
+
+ if !insideBracket {
+ log.Panicf(errMissingStartBracket, k)
+ }
+
+ if rd = d.findAlias(k[:idx]); rd == nil {
+
+ l = len(d.dm) + 1
+
+ if l > cap(d.dm) {
+ dm := make(dataMap, l)
+ copy(dm, d.dm)
+ rd = new(recursiveData)
+ dm[len(d.dm)] = rd
+ d.dm = dm
+ } else {
+ l = len(d.dm)
+ d.dm = d.dm[:l+1]
+ rd = d.dm[l]
+ rd.sliceLen = 0
+ rd.keys = rd.keys[0:0]
+ }
+
+ rd.alias = k[:idx]
+ }
+
+ // is map + key
+ ke := key{
+ ivalue: -1,
+ value: k[idx+1 : i],
+ searchValue: k[idx : i+1],
+ }
+
+ // is key is number, most likely array key, keep track of just in case an array/slice.
+ if isNum {
+
+ // no need to check for error, it will always pass
+ // as we have done the checking to ensure
+ // the value is a number ahead of time.
+ ke.ivalue, _ = strconv.Atoi(ke.value)
+
+ if ke.ivalue > rd.sliceLen {
+ rd.sliceLen = ke.ivalue
+
+ }
+ }
+
+ rd.keys = append(rd.keys, ke)
+
+ insideBracket = false
+ default:
+ // checking if not a number, 0-9 is 48-57 in byte, see for yourself fmt.Println('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')
+ if insideBracket && (k[i] > 57 || k[i] < 48) {
+ isNum = false
+ }
+ }
+ }
+
+ // if still inside bracket, that means no ending bracket was ever specified
+ if insideBracket {
+ log.Panicf(errMissingEndBracket, k)
+ }
+ }
+}
+
+func (d *decoder) traverseStruct(v reflect.Value, typ reflect.Type, namespace []byte) (set bool) {
+
+ l := len(namespace)
+ first := l == 0
+
+ // anonymous structs will still work for caching as the whole definition is stored
+ // including tags
+ s, ok := d.d.structCache.Get(typ)
+ if !ok {
+ s = d.d.structCache.parseStruct(d.d.mode, v, typ, d.d.tagName)
+ }
+
+ for _, f := range s.fields {
+
+ namespace = namespace[:l]
+
+ if f.isAnonymous {
+ if d.setFieldByType(v.Field(f.idx), namespace, 0) {
+ set = true
+ }
+ }
+
+ if first {
+ namespace = append(namespace, f.name...)
+ } else {
+ namespace = append(namespace, namespaceSeparator)
+ namespace = append(namespace, f.name...)
+ }
+
+ if d.setFieldByType(v.Field(f.idx), namespace, 0) {
+ set = true
+ }
+ }
+
+ return
+}
+
+func (d *decoder) setFieldByType(current reflect.Value, namespace []byte, idx int) (set bool) {
+
+ var err error
+ v, kind := ExtractType(current)
+
+ arr, ok := d.values[string(namespace)]
+
+ if d.d.customTypeFuncs != nil {
+
+ if ok {
+ if cf, ok := d.d.customTypeFuncs[v.Type()]; ok {
+ val, err := cf(arr[idx:])
+ if err != nil {
+ d.setError(namespace, err)
+ return
+ }
+
+ v.Set(reflect.ValueOf(val))
+ set = true
+ return
+ }
+ }
+ }
+ switch kind {
+ case reflect.Interface:
+ if !ok {
+ return
+ }
+ v.Set(reflect.ValueOf(arr[idx]))
+ set = true
+
+ case reflect.Ptr:
+
+ newVal := reflect.New(v.Type().Elem())
+ if set = d.setFieldByType(newVal.Elem(), namespace, idx); set {
+ v.Set(newVal)
+ }
+
+ case reflect.String:
+ if !ok {
+ return
+ }
+ v.SetString(arr[idx])
+ set = true
+
+ case reflect.Uint, reflect.Uint64:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var u64 uint64
+ if u64, err = strconv.ParseUint(arr[idx], 10, 64); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetUint(u64)
+ set = true
+
+ case reflect.Uint8:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var u64 uint64
+ if u64, err = strconv.ParseUint(arr[idx], 10, 8); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetUint(u64)
+ set = true
+
+ case reflect.Uint16:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var u64 uint64
+ if u64, err = strconv.ParseUint(arr[idx], 10, 16); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetUint(u64)
+ set = true
+
+ case reflect.Uint32:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var u64 uint64
+ if u64, err = strconv.ParseUint(arr[idx], 10, 32); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetUint(u64)
+ set = true
+
+ case reflect.Int, reflect.Int64:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var i64 int64
+ if i64, err = strconv.ParseInt(arr[idx], 10, 64); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetInt(i64)
+ set = true
+
+ case reflect.Int8:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var i64 int64
+ if i64, err = strconv.ParseInt(arr[idx], 10, 8); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetInt(i64)
+ set = true
+
+ case reflect.Int16:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var i64 int64
+ if i64, err = strconv.ParseInt(arr[idx], 10, 16); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetInt(i64)
+ set = true
+
+ case reflect.Int32:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var i64 int64
+ if i64, err = strconv.ParseInt(arr[idx], 10, 32); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetInt(i64)
+ set = true
+
+ case reflect.Float32:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var f float64
+ if f, err = strconv.ParseFloat(arr[idx], 32); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", arr[0], v.Type(), string(namespace)))
+ return
+ }
+ v.SetFloat(f)
+ set = true
+
+ case reflect.Float64:
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+ var f float64
+ if f, err = strconv.ParseFloat(arr[idx], 64); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", arr[0], v.Type(), string(namespace)))
+ return
+ }
+ v.SetFloat(f)
+ set = true
+
+ case reflect.Bool:
+ if !ok {
+ return
+ }
+ var b bool
+ if b, err = parseBool(arr[idx]); err != nil {
+ d.setError(namespace, fmt.Errorf("Invalid Boolean Value '%s' Type '%v' Namespace '%s'", arr[idx], v.Type(), string(namespace)))
+ return
+ }
+ v.SetBool(b)
+ set = true
+
+ case reflect.Slice:
+ d.parseMapData()
+ // slice elements could be mixed eg. number and non-numbers Value[0]=[]string{"10"} and Value=[]string{"10","20"}
+
+ if ok && len(arr) > 0 {
+ var varr reflect.Value
+
+ var ol int
+ l := len(arr)
+
+ if v.IsNil() {
+ varr = reflect.MakeSlice(v.Type(), len(arr), len(arr))
+ } else {
+
+ ol = v.Len()
+ l += ol
+
+ if v.Cap() <= l {
+ varr = reflect.MakeSlice(v.Type(), l, l)
+ } else {
+ // preserve predefined capacity, possibly for reuse after decoding
+ varr = reflect.MakeSlice(v.Type(), l, v.Cap())
+ }
+ reflect.Copy(varr, v)
+ }
+
+ for i := ol; i < l; i++ {
+ newVal := reflect.New(v.Type().Elem()).Elem()
+
+ if d.setFieldByType(newVal, namespace, i-ol) {
+ set = true
+ varr.Index(i).Set(newVal)
+ }
+ }
+
+ v.Set(varr)
+ }
+
+ // maybe it's an numbered array i.e. Phone[0].Number
+ if rd := d.findAlias(string(namespace)); rd != nil {
+
+ var varr reflect.Value
+ var kv key
+
+ sl := rd.sliceLen + 1
+
+ // checking below for maxArraySize, but if array exists and already
+ // has sufficient capacity allocated then we do not check as the code
+ // obviously allows a capacity greater than the maxArraySize.
+
+ if v.IsNil() {
+
+ if sl > d.d.maxArraySize {
+ d.setError(namespace, fmt.Errorf(errArraySize, sl, d.d.maxArraySize))
+ return
+ }
+
+ varr = reflect.MakeSlice(v.Type(), sl, sl)
+
+ } else if v.Len() < sl {
+
+ if v.Cap() <= sl {
+
+ if sl > d.d.maxArraySize {
+ d.setError(namespace, fmt.Errorf(errArraySize, sl, d.d.maxArraySize))
+ return
+ }
+
+ varr = reflect.MakeSlice(v.Type(), sl, sl)
+ } else {
+ varr = reflect.MakeSlice(v.Type(), sl, v.Cap())
+ }
+
+ reflect.Copy(varr, v)
+
+ } else {
+ varr = v
+ }
+
+ for i := 0; i < len(rd.keys); i++ {
+
+ kv = rd.keys[i]
+ newVal := reflect.New(varr.Type().Elem()).Elem()
+
+ if kv.ivalue == -1 {
+ d.setError(namespace, fmt.Errorf("invalid slice index '%s'", kv.value))
+ continue
+ }
+
+ if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
+ set = true
+ varr.Index(kv.ivalue).Set(newVal)
+ }
+ }
+
+ if !set {
+ return
+ }
+
+ v.Set(varr)
+ }
+
+ case reflect.Array:
+ d.parseMapData()
+
+ // array elements could be mixed eg. number and non-numbers Value[0]=[]string{"10"} and Value=[]string{"10","20"}
+
+ if ok && len(arr) > 0 {
+ var varr reflect.Value
+ l := len(arr)
+ overCapacity := v.Len() < l
+ if overCapacity {
+ // more values than array capacity, ignore values over capacity as it's possible some would just want
+ // to grab the first x number of elements; in the future strict mode logic should return an error
+ fmt.Println("warning number of post form array values is larger than array capacity, ignoring overflow values")
+ }
+ varr = reflect.Indirect(reflect.New(reflect.ArrayOf(v.Len(), v.Type().Elem())))
+ reflect.Copy(varr, v)
+
+ if v.Len() < len(arr) {
+ l = v.Len()
+ }
+ for i := 0; i < l; i++ {
+ newVal := reflect.New(v.Type().Elem()).Elem()
+
+ if d.setFieldByType(newVal, namespace, i) {
+ set = true
+ varr.Index(i).Set(newVal)
+ }
+ }
+ v.Set(varr)
+ }
+
+ // maybe it's an numbered array i.e. Phone[0].Number
+ if rd := d.findAlias(string(namespace)); rd != nil {
+ var varr reflect.Value
+ var kv key
+
+ overCapacity := rd.sliceLen >= v.Len()
+ if overCapacity {
+ // more values than array capacity, ignore values over capacity as it's possible some would just want
+ // to grab the first x number of elements; in the future strict mode logic should return an error
+ fmt.Println("warning number of post form array values is larger than array capacity, ignoring overflow values")
+ }
+ varr = reflect.Indirect(reflect.New(reflect.ArrayOf(v.Len(), v.Type().Elem())))
+ reflect.Copy(varr, v)
+
+ for i := 0; i < len(rd.keys); i++ {
+ kv = rd.keys[i]
+ if kv.ivalue >= v.Len() {
+ continue
+ }
+ newVal := reflect.New(varr.Type().Elem()).Elem()
+
+ if kv.ivalue == -1 {
+ d.setError(namespace, fmt.Errorf("invalid array index '%s'", kv.value))
+ continue
+ }
+
+ if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
+ set = true
+ varr.Index(kv.ivalue).Set(newVal)
+ }
+ }
+
+ if !set {
+ return
+ }
+ v.Set(varr)
+ }
+
+ case reflect.Map:
+ var rd *recursiveData
+
+ d.parseMapData()
+
+ // no natural map support so skip directly to dm lookup
+ if rd = d.findAlias(string(namespace)); rd == nil {
+ return
+ }
+
+ var existing bool
+ var kv key
+ var mp reflect.Value
+ var mk reflect.Value
+
+ typ := v.Type()
+
+ if v.IsNil() {
+ mp = reflect.MakeMap(typ)
+ } else {
+ existing = true
+ mp = v
+ }
+
+ for i := 0; i < len(rd.keys); i++ {
+ newVal := reflect.New(typ.Elem()).Elem()
+ mk = reflect.New(typ.Key()).Elem()
+ kv = rd.keys[i]
+
+ if err := d.getMapKey(kv.value, mk, namespace); err != nil {
+ d.setError(namespace, err)
+ continue
+ }
+
+ if d.setFieldByType(newVal, append(namespace, kv.searchValue...), 0) {
+ set = true
+ mp.SetMapIndex(mk, newVal)
+ }
+ }
+
+ if !set || existing {
+ return
+ }
+
+ v.Set(mp)
+
+ case reflect.Struct:
+ typ := v.Type()
+
+ // if we get here then no custom time function declared so use RFC3339 by default
+ if typ == timeType {
+
+ if !ok || len(arr[idx]) == 0 {
+ return
+ }
+
+ t, err := time.Parse(time.RFC3339, arr[idx])
+ if err != nil {
+ d.setError(namespace, err)
+ }
+
+ v.Set(reflect.ValueOf(t))
+ set = true
+ return
+ }
+
+ d.parseMapData()
+
+ // we must be recursing infinitly...but that's ok we caught it on the very first overun.
+ if len(namespace) > d.maxKeyLen {
+ return
+ }
+
+ set = d.traverseStruct(v, typ, namespace)
+ }
+ return
+}
+
+func (d *decoder) getMapKey(key string, current reflect.Value, namespace []byte) (err error) {
+
+ v, kind := ExtractType(current)
+
+ if d.d.customTypeFuncs != nil {
+ if cf, ok := d.d.customTypeFuncs[v.Type()]; ok {
+
+ val, er := cf([]string{key})
+ if er != nil {
+ err = er
+ return
+ }
+
+ v.Set(reflect.ValueOf(val))
+ return
+ }
+ }
+
+ switch kind {
+ case reflect.Interface:
+ // If interface would have been set on the struct before decoding,
+ // say to a struct value we would not get here but kind would be struct.
+ v.Set(reflect.ValueOf(key))
+ return
+ case reflect.Ptr:
+ newVal := reflect.New(v.Type().Elem())
+ if err = d.getMapKey(key, newVal.Elem(), namespace); err == nil {
+ v.Set(newVal)
+ }
+
+ case reflect.String:
+ v.SetString(key)
+
+ case reflect.Uint, reflect.Uint64:
+
+ u64, e := strconv.ParseUint(key, 10, 64)
+ if e != nil {
+ err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetUint(u64)
+
+ case reflect.Uint8:
+
+ u64, e := strconv.ParseUint(key, 10, 8)
+ if e != nil {
+ err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetUint(u64)
+
+ case reflect.Uint16:
+
+ u64, e := strconv.ParseUint(key, 10, 16)
+ if e != nil {
+ err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetUint(u64)
+
+ case reflect.Uint32:
+
+ u64, e := strconv.ParseUint(key, 10, 32)
+ if e != nil {
+ err = fmt.Errorf("Invalid Unsigned Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetUint(u64)
+
+ case reflect.Int, reflect.Int64:
+
+ i64, e := strconv.ParseInt(key, 10, 64)
+ if e != nil {
+ err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetInt(i64)
+
+ case reflect.Int8:
+
+ i64, e := strconv.ParseInt(key, 10, 8)
+ if e != nil {
+ err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetInt(i64)
+
+ case reflect.Int16:
+
+ i64, e := strconv.ParseInt(key, 10, 16)
+ if e != nil {
+ err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetInt(i64)
+
+ case reflect.Int32:
+
+ i64, e := strconv.ParseInt(key, 10, 32)
+ if e != nil {
+ err = fmt.Errorf("Invalid Integer Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetInt(i64)
+
+ case reflect.Float32:
+
+ f, e := strconv.ParseFloat(key, 32)
+ if e != nil {
+ err = fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetFloat(f)
+
+ case reflect.Float64:
+
+ f, e := strconv.ParseFloat(key, 64)
+ if e != nil {
+ err = fmt.Errorf("Invalid Float Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetFloat(f)
+
+ case reflect.Bool:
+
+ b, e := parseBool(key)
+ if e != nil {
+ err = fmt.Errorf("Invalid Boolean Value '%s' Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ return
+ }
+
+ v.SetBool(b)
+
+ default:
+ err = fmt.Errorf("Unsupported Map Key '%s', Type '%v' Namespace '%s'", key, v.Type(), string(namespace))
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-playground/form/doc.go b/vendor/github.com/go-playground/form/doc.go
new file mode 100644
index 0000000..b2201eb
--- /dev/null
+++ b/vendor/github.com/go-playground/form/doc.go
@@ -0,0 +1,275 @@
+/*
+Package form Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values.
+
+
+It has the following features:
+
+ - Primitives types cause zero allocations.
+ - Supports map of almost all types.
+ - Supports both Numbered and Normal arrays eg. "Array[0]" and just "Array"
+ with multiple values passed.
+ - Slice honours the specified index. eg. if "Slice[2]" is the only Slice
+ value passed down, it will be put at index 2; if slice isn't big enough
+ it will be expanded.
+ - Array honours the specified index. eg. if "Array[2]" is the only Array
+ value passed down, it will be put at index 2; if array isn't big enough
+ a warning will be printed and value ignored.
+ - Only creates objects as necessary eg. if no `array` or `map` values are
+ passed down, the `array` and `map` are left as their default values in
+ the struct.
+ - Allows for Custom Type registration.
+ - Handles time.Time using RFC3339 time format by default,
+ but can easily be changed by registering a Custom Type, see below.
+ - Handles Encoding & Decoding of almost all Go types eg. can Decode into
+ struct, array, map, int... and Encode a struct, array, map, int...
+
+Common Questions
+
+Questions
+
+ Does it support encoding.TextUnmarshaler?
+ No because TextUnmarshaler only accepts []byte but posted values can have
+ multiple values, so is not suitable.
+
+ Mixing array/slice with array[idx]/slice[idx], in which order are they parsed?
+ array/slice then array[idx]/slice[idx]
+
+Supported Types
+
+out of the box supported types
+
+ - string
+ - bool
+ - int, int8, int16, int32, int64
+ - uint, uint8, uint16, uint32, uint64
+ - float32, float64
+ - struct and anonymous struct
+ - interface{}
+ - time.Time` - by default using RFC3339
+ - a `pointer` to one of the above types
+ - slice, array
+ - map
+ - `custom types` can override any of the above types
+ - many other types may be supported inherently (eg. bson.ObjectId is
+ type ObjectId string, which will get populated by the string type
+
+ **NOTE**: map, struct and slice nesting are ad infinitum.
+
+Usage
+
+symbols
+
+ - Use symbol `.` for separating fields/structs. (eg. `structfield.field`)
+ - Use `[index or key]` for access to index of a slice/array or key for map.
+ (eg. `arrayfield[0]`, `mapfield[keyvalue]`)
+
+html
+
+
+
+Example
+
+example decoding the above HTML
+
+ package main
+
+ import (
+ "fmt"
+ "log"
+ "net/url"
+
+ "github.com/go-playground/form"
+ )
+
+ // Address contains address information
+ type Address struct {
+ Name string
+ Phone string
+ }
+
+ // User contains user information
+ type User struct {
+ Name string
+ Age uint8
+ Gender string
+ Address []Address
+ Active bool `form:"active"`
+ MapExample map[string]string
+ NestedMap map[string]map[string]string
+ NestedArray [][]string
+ }
+
+ // use a single instance of Decoder, it caches struct info
+ var decoder *form.Decoder
+
+ func main() {
+ decoder = form.NewDecoder()
+
+ // this simulates the results of http.Request's ParseForm() function
+ values := parseForm()
+
+ var user User
+
+ // must pass a pointer
+ err := decoder.Decode(&user, values)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("%#v\n", user)
+ }
+
+ // this simulates the results of http.Request's ParseForm() function
+ func parseForm() url.Values {
+ return url.Values{
+ "Name": []string{"joeybloggs"},
+ "Age": []string{"3"},
+ "Gender": []string{"Male"},
+ "Address[0].Name": []string{"26 Here Blvd."},
+ "Address[0].Phone": []string{"9(999)999-9999"},
+ "Address[1].Name": []string{"26 There Blvd."},
+ "Address[1].Phone": []string{"1(111)111-1111"},
+ "active": []string{"true"},
+ "MapExample[key]": []string{"value"},
+ "NestedMap[key][key]": []string{"value"},
+ "NestedArray[0][0]": []string{"value"},
+ }
+ }
+
+example encoding
+
+ package main
+
+ import (
+ "fmt"
+ "log"
+
+ "github.com/go-playground/form"
+ )
+
+ // Address contains address information
+ type Address struct {
+ Name string
+ Phone string
+ }
+
+ // User contains user information
+ type User struct {
+ Name string
+ Age uint8
+ Gender string
+ Address []Address
+ Active bool `form:"active"`
+ MapExample map[string]string
+ NestedMap map[string]map[string]string
+ NestedArray [][]string
+ }
+
+ // use a single instance of Encoder, it caches struct info
+ var encoder *form.Encoder
+
+ func main() {
+ encoder = form.NewEncoder()
+
+ user := User{
+ Name: "joeybloggs",
+ Age: 3,
+ Gender: "Male",
+ Address: []Address{
+ {Name: "26 Here Blvd.", Phone: "9(999)999-9999"},
+ {Name: "26 There Blvd.", Phone: "1(111)111-1111"},
+ },
+ Active: true,
+ MapExample: map[string]string{"key": "value"},
+ NestedMap: map[string]map[string]string{"key": {"key": "value"}},
+ NestedArray: [][]string{{"value"}},
+ }
+
+ // must pass a pointer
+ values, err := encoder.Encode(&user)
+ if err != nil {
+ log.Panic(err)
+ }
+
+ fmt.Printf("%#v\n", values)
+ }
+
+
+Registering Custom Types
+
+Decoder
+
+ decoder.RegisterCustomTypeFunc(func(vals []string) (interface{}, error) {
+ return time.Parse("2006-01-02", vals[0])
+ }, time.Time{})
+
+ ADDITIONAL: if a struct type is registered, the function will only be called
+ if a url.Value exists for the struct and not just the struct fields
+ eg. url.Values{"User":"Name%3Djoeybloggs"} will call the custom type function
+ with 'User' as the type, however url.Values{"User.Name":"joeybloggs"} will not.
+
+Encoder
+
+ encoder.RegisterCustomTypeFunc(func(x interface{}) ([]string, error) {
+ return []string{x.(time.Time).Format("2006-01-02")}, nil
+ }, time.Time{})
+
+
+Ignoring Fields
+
+you can tell form to ignore fields using `-` in the tag
+
+ type MyStruct struct {
+ Field string `form:"-"`
+ }
+
+Omitempty
+
+you can tell form to omit empty fields using `,omitempty` or `FieldName,omitempty` in the tag
+
+ type MyStruct struct {
+ Field string `form:",omitempty"`
+ Field2 string `form:"CustomFieldName,omitempty"`
+ }
+
+
+Notes
+
+To maximize compatibility with other systems the Encoder attempts
+to avoid using array indexes in url.Values if at all possible.
+
+ eg.
+
+ // A struct field of
+ Field []string{"1", "2", "3"}
+
+ // will be output a url.Value as
+ "Field": []string{"1", "2", "3"}
+
+ and not
+ "Field[0]": []string{"1"}
+ "Field[1]": []string{"2"}
+ "Field[2]": []string{"3"}
+
+ // however there are times where it is unavoidable, like with pointers
+ i := int(1)
+ Field []*string{nil, nil, &i}
+
+ // to avoid index 1 and 2 must use index
+ "Field[2]": []string{"1"}
+
+*/
+package form
diff --git a/vendor/github.com/go-playground/form/encoder.go b/vendor/github.com/go-playground/form/encoder.go
new file mode 100644
index 0000000..9f26866
--- /dev/null
+++ b/vendor/github.com/go-playground/form/encoder.go
@@ -0,0 +1,260 @@
+package form
+
+import (
+ "fmt"
+ "net/url"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+type encoder struct {
+ e *Encoder
+ errs EncodeErrors
+ values url.Values
+ namespace []byte
+}
+
+func (e *encoder) setError(namespace []byte, err error) {
+ if e.errs == nil {
+ e.errs = make(EncodeErrors)
+ }
+
+ e.errs[string(namespace)] = err
+}
+
+func (e *encoder) setVal(namespace []byte, idx int, vals ...string) {
+
+ arr, ok := e.values[string(namespace)]
+ if ok {
+ arr = append(arr, vals...)
+ } else {
+ arr = vals
+ }
+
+ e.values[string(namespace)] = arr
+}
+
+func (e *encoder) traverseStruct(v reflect.Value, namespace []byte, idx int) {
+
+ typ := v.Type()
+ l := len(namespace)
+ first := l == 0
+
+ // anonymous structs will still work for caching as the whole definition is stored
+ // including tags
+ s, ok := e.e.structCache.Get(typ)
+ if !ok {
+ s = e.e.structCache.parseStruct(e.e.mode, v, typ, e.e.tagName)
+ }
+
+ for _, f := range s.fields {
+ namespace = namespace[:l]
+
+ if f.isAnonymous && e.e.embedAnonymous {
+ e.setFieldByType(v.Field(f.idx), namespace, idx, f.isOmitEmpty)
+ continue
+ }
+
+ if first {
+ namespace = append(namespace, f.name...)
+ } else {
+ namespace = append(namespace, namespaceSeparator)
+ namespace = append(namespace, f.name...)
+ }
+
+ e.setFieldByType(v.Field(f.idx), namespace, idx, f.isOmitEmpty)
+ }
+}
+
+func (e *encoder) setFieldByType(current reflect.Value, namespace []byte, idx int, isOmitEmpty bool) {
+
+ if idx > -1 && current.Kind() == reflect.Ptr {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ idx = -2
+ }
+
+ if isOmitEmpty && !hasValue(current) {
+ return
+ }
+ v, kind := ExtractType(current)
+
+ if e.e.customTypeFuncs != nil {
+
+ if cf, ok := e.e.customTypeFuncs[v.Type()]; ok {
+
+ arr, err := cf(v.Interface())
+ if err != nil {
+ e.setError(namespace, err)
+ return
+ }
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ e.setVal(namespace, idx, arr...)
+ return
+ }
+ }
+
+ switch kind {
+ case reflect.Ptr, reflect.Interface, reflect.Invalid:
+ return
+
+ case reflect.String:
+
+ e.setVal(namespace, idx, v.String())
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+
+ e.setVal(namespace, idx, strconv.FormatUint(v.Uint(), 10))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ e.setVal(namespace, idx, strconv.FormatInt(v.Int(), 10))
+
+ case reflect.Float32:
+
+ e.setVal(namespace, idx, strconv.FormatFloat(v.Float(), 'f', -1, 32))
+
+ case reflect.Float64:
+
+ e.setVal(namespace, idx, strconv.FormatFloat(v.Float(), 'f', -1, 64))
+
+ case reflect.Bool:
+
+ e.setVal(namespace, idx, strconv.FormatBool(v.Bool()))
+
+ case reflect.Slice, reflect.Array:
+
+ if idx == -1 {
+
+ for i := 0; i < v.Len(); i++ {
+ e.setFieldByType(v.Index(i), namespace, i, false)
+ }
+
+ return
+ }
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ namespace = append(namespace, '[')
+ l := len(namespace)
+
+ for i := 0; i < v.Len(); i++ {
+ namespace = namespace[:l]
+ namespace = strconv.AppendInt(namespace, int64(i), 10)
+ namespace = append(namespace, ']')
+ e.setFieldByType(v.Index(i), namespace, -2, false)
+ }
+
+ case reflect.Map:
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ var valid bool
+ var s string
+ l := len(namespace)
+
+ for _, key := range v.MapKeys() {
+
+ namespace = namespace[:l]
+
+ if s, valid = e.getMapKey(key, namespace); !valid {
+ continue
+ }
+
+ namespace = append(namespace, '[')
+ namespace = append(namespace, s...)
+ namespace = append(namespace, ']')
+
+ e.setFieldByType(current.MapIndex(key), namespace, -2, false)
+ }
+
+ case reflect.Struct:
+
+ // if we get here then no custom time function declared so use RFC3339 by default
+ if v.Type() == timeType {
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ e.setVal(namespace, idx, v.Interface().(time.Time).Format(time.RFC3339))
+ return
+ }
+
+ if idx == -1 {
+ e.traverseStruct(v, namespace, idx)
+ return
+ }
+
+ if idx > -1 {
+ namespace = append(namespace, '[')
+ namespace = strconv.AppendInt(namespace, int64(idx), 10)
+ namespace = append(namespace, ']')
+ }
+
+ e.traverseStruct(v, namespace, -2)
+ }
+}
+
+func (e *encoder) getMapKey(key reflect.Value, namespace []byte) (string, bool) {
+
+ v, kind := ExtractType(key)
+
+ if e.e.customTypeFuncs != nil {
+
+ if cf, ok := e.e.customTypeFuncs[v.Type()]; ok {
+ arr, err := cf(v.Interface())
+ if err != nil {
+ e.setError(namespace, err)
+ return "", false
+ }
+
+ return arr[0], true
+ }
+ }
+
+ switch kind {
+ case reflect.Interface, reflect.Ptr:
+ return "", false
+
+ case reflect.String:
+ return v.String(), true
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return strconv.FormatUint(v.Uint(), 10), true
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(v.Int(), 10), true
+
+ case reflect.Float32:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 32), true
+
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64), true
+
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool()), true
+
+ default:
+ e.setError(namespace, fmt.Errorf("Unsupported Map Key '%v' Namespace '%s'", v.String(), namespace))
+ return "", false
+ }
+}
diff --git a/vendor/github.com/go-playground/form/form.go b/vendor/github.com/go-playground/form/form.go
new file mode 100644
index 0000000..0ce5917
--- /dev/null
+++ b/vendor/github.com/go-playground/form/form.go
@@ -0,0 +1,50 @@
+package form
+
+import (
+ "reflect"
+ "time"
+)
+
+const (
+ blank = ""
+ namespaceSeparator = '.'
+ ignore = "-"
+ fieldNS = "Field Namespace:"
+ errorText = " ERROR:"
+)
+
+var (
+ timeType = reflect.TypeOf(time.Time{})
+)
+
+// Mode specifies which mode the form decoder is to run
+type Mode uint8
+
+const (
+
+ // ModeImplicit tries to parse values for all
+ // fields that do not have an ignore '-' tag
+ ModeImplicit Mode = iota
+
+ // ModeExplicit only parses values for field with a field tag
+ // and that tag is not the ignore '-' tag
+ ModeExplicit
+)
+
+// AnonymousMode specifies how data should be rolled up
+// or separated from anonymous structs
+type AnonymousMode uint8
+
+const (
+ // AnonymousEmbed embeds anonymous data when encoding
+ // eg. type A struct { Field string }
+ // type B struct { A, Field string }
+ // encode results: url.Values{"Field":[]string{"B FieldVal", "A FieldVal"}}
+ AnonymousEmbed AnonymousMode = iota
+
+ // AnonymousSeparate does not embed anonymous data when encoding
+ // eg. type A struct { Field string }
+ // type B struct { A, Field string }
+ // encode results: url.Values{"Field":[]string{"B FieldVal"}, "A.Field":[]string{"A FieldVal"}}
+ AnonymousSeparate
+)
diff --git a/vendor/github.com/go-playground/form/form_decoder.go b/vendor/github.com/go-playground/form/form_decoder.go
new file mode 100644
index 0000000..fe8b36f
--- /dev/null
+++ b/vendor/github.com/go-playground/form/form_decoder.go
@@ -0,0 +1,174 @@
+package form
+
+import (
+ "bytes"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// DecodeCustomTypeFunc allows for registering/overriding types to be parsed.
+type DecodeCustomTypeFunc func([]string) (interface{}, error)
+
+// DecodeErrors is a map of errors encountered during form decoding
+type DecodeErrors map[string]error
+
+func (d DecodeErrors) Error() string {
+ buff := bytes.NewBufferString(blank)
+
+ for k, err := range d {
+ buff.WriteString(fieldNS)
+ buff.WriteString(k)
+ buff.WriteString(errorText)
+ buff.WriteString(err.Error())
+ buff.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buff.String())
+}
+
+// An InvalidDecoderError describes an invalid argument passed to Decode.
+// (The argument passed to Decode must be a non-nil pointer.)
+type InvalidDecoderError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidDecoderError) Error() string {
+
+ if e.Type == nil {
+ return "form: Decode(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "form: Decode(non-pointer " + e.Type.String() + ")"
+ }
+
+ return "form: Decode(nil " + e.Type.String() + ")"
+}
+
+type key struct {
+ ivalue int
+ value string
+ searchValue string
+}
+
+type recursiveData struct {
+ alias string
+ sliceLen int
+ keys []key
+}
+
+type dataMap []*recursiveData
+
+// Decoder is the main decode instance
+type Decoder struct {
+ tagName string
+ mode Mode
+ structCache *structCacheMap
+ customTypeFuncs map[reflect.Type]DecodeCustomTypeFunc
+ maxArraySize int
+ dataPool *sync.Pool
+}
+
+// NewDecoder creates a new decoder instance with sane defaults
+func NewDecoder() *Decoder {
+
+ d := &Decoder{
+ tagName: "form",
+ mode: ModeImplicit,
+ structCache: newStructCacheMap(),
+ maxArraySize: 10000,
+ }
+
+ d.dataPool = &sync.Pool{New: func() interface{} {
+ return &decoder{
+ d: d,
+ namespace: make([]byte, 0, 64),
+ }
+ }}
+
+ return d
+}
+
+// SetTagName sets the given tag name to be used by the decoder.
+// Default is "form"
+func (d *Decoder) SetTagName(tagName string) {
+ d.tagName = tagName
+}
+
+// SetMode sets the mode the decoder should run
+// Default is ModeImplicit
+func (d *Decoder) SetMode(mode Mode) {
+ d.mode = mode
+}
+
+// SetMaxArraySize sets maximum array size that can be created.
+// This limit is for the array indexing this library supports to
+// avoid potential DOS or man-in-the-middle attacks using an unusually
+// high number.
+// DEFAULT: 10000
+func (d *Decoder) SetMaxArraySize(size uint) {
+ d.maxArraySize = int(size)
+}
+
+// RegisterTagNameFunc registers a custom tag name parser function
+// NOTE: This method is not thread-safe it is intended that these all be registered prior to any parsing
+//
+// ADDITIONAL: once a custom function has been registered the default, or custom set, tag name is ignored
+// and relies 100% on the function for the name data. The return value WILL BE CACHED and so return value
+// must be consistent.
+func (d *Decoder) RegisterTagNameFunc(fn TagNameFunc) {
+ d.structCache.tagFn = fn
+}
+
+// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types.
+// NOTE: This method is not thread-safe it is intended that these all be registered prior to any parsing
+//
+// ADDITIONAL: if a struct type is registered, the function will only be called if a url.Value exists for
+// the struct and not just the struct fields eg. url.Values{"User":"Name%3Djoeybloggs"} will call the
+// custom type function with `User` as the type, however url.Values{"User.Name":"joeybloggs"} will not.
+func (d *Decoder) RegisterCustomTypeFunc(fn DecodeCustomTypeFunc, types ...interface{}) {
+
+ if d.customTypeFuncs == nil {
+ d.customTypeFuncs = map[reflect.Type]DecodeCustomTypeFunc{}
+ }
+
+ for _, t := range types {
+ d.customTypeFuncs[reflect.TypeOf(t)] = fn
+ }
+}
+
+// Decode parses the given values and sets the corresponding struct and/or type values
+//
+// Decode returns an InvalidDecoderError if interface passed is invalid.
+func (d *Decoder) Decode(v interface{}, values url.Values) (err error) {
+
+ val := reflect.ValueOf(v)
+
+ if val.Kind() != reflect.Ptr || val.IsNil() {
+ return &InvalidDecoderError{reflect.TypeOf(v)}
+ }
+
+ dec := d.dataPool.Get().(*decoder)
+ dec.values = values
+ dec.dm = dec.dm[0:0]
+
+ val = val.Elem()
+ typ := val.Type()
+
+ if val.Kind() == reflect.Struct && typ != timeType {
+ dec.traverseStruct(val, typ, dec.namespace[0:0])
+ } else {
+ dec.setFieldByType(val, dec.namespace[0:0], 0)
+ }
+
+ if len(dec.errs) > 0 {
+ err = dec.errs
+ dec.errs = nil
+ }
+
+ d.dataPool.Put(dec)
+
+ return
+}
diff --git a/vendor/github.com/go-playground/form/form_encoder.go b/vendor/github.com/go-playground/form/form_encoder.go
new file mode 100644
index 0000000..ad3cc10
--- /dev/null
+++ b/vendor/github.com/go-playground/form/form_encoder.go
@@ -0,0 +1,144 @@
+package form
+
+import (
+ "bytes"
+ "net/url"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// EncodeCustomTypeFunc allows for registering/overriding types to be parsed.
+type EncodeCustomTypeFunc func(x interface{}) ([]string, error)
+
+// EncodeErrors is a map of errors encountered during form encoding
+type EncodeErrors map[string]error
+
+func (e EncodeErrors) Error() string {
+ buff := bytes.NewBufferString(blank)
+
+ for k, err := range e {
+ buff.WriteString(fieldNS)
+ buff.WriteString(k)
+ buff.WriteString(errorText)
+ buff.WriteString(err.Error())
+ buff.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buff.String())
+}
+
+// An InvalidEncodeError describes an invalid argument passed to Encode.
+type InvalidEncodeError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidEncodeError) Error() string {
+
+ if e.Type == nil {
+ return "form: Encode(nil)"
+ }
+
+ return "form: Encode(nil " + e.Type.String() + ")"
+}
+
+// Encoder is the main encode instance
+type Encoder struct {
+ tagName string
+ structCache *structCacheMap
+ customTypeFuncs map[reflect.Type]EncodeCustomTypeFunc
+ dataPool *sync.Pool
+ mode Mode
+ embedAnonymous bool
+}
+
+// NewEncoder creates a new encoder instance with sane defaults
+func NewEncoder() *Encoder {
+
+ e := &Encoder{
+ tagName: "form",
+ mode: ModeImplicit,
+ structCache: newStructCacheMap(),
+ embedAnonymous: true,
+ }
+
+ e.dataPool = &sync.Pool{New: func() interface{} {
+ return &encoder{
+ e: e,
+ namespace: make([]byte, 0, 64),
+ }
+ }}
+
+ return e
+}
+
+// SetTagName sets the given tag name to be used by the encoder.
+// Default is "form"
+func (e *Encoder) SetTagName(tagName string) {
+ e.tagName = tagName
+}
+
+// SetMode sets the mode the encoder should run
+// Default is ModeImplicit
+func (e *Encoder) SetMode(mode Mode) {
+ e.mode = mode
+}
+
+// SetAnonymousMode sets the mode the encoder should run
+// Default is AnonymousEmbed
+func (e *Encoder) SetAnonymousMode(mode AnonymousMode) {
+ e.embedAnonymous = mode == AnonymousEmbed
+}
+
+// RegisterTagNameFunc registers a custom tag name parser function
+// NOTE: This method is not thread-safe it is intended that these all be registered prior to any parsing
+//
+// ADDITIONAL: once a custom function has been registered the default, or custom set, tag name is ignored
+// and relies 100% on the function for the name data. The return value WILL BE CACHED and so return value
+// must be consistent.
+func (e *Encoder) RegisterTagNameFunc(fn TagNameFunc) {
+ e.structCache.tagFn = fn
+}
+
+// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any parsing
+func (e *Encoder) RegisterCustomTypeFunc(fn EncodeCustomTypeFunc, types ...interface{}) {
+
+ if e.customTypeFuncs == nil {
+ e.customTypeFuncs = map[reflect.Type]EncodeCustomTypeFunc{}
+ }
+
+ for _, t := range types {
+ e.customTypeFuncs[reflect.TypeOf(t)] = fn
+ }
+}
+
+// Encode encodes the given values and sets the corresponding struct values
+func (e *Encoder) Encode(v interface{}) (values url.Values, err error) {
+
+ val, kind := ExtractType(reflect.ValueOf(v))
+
+ if kind == reflect.Ptr || kind == reflect.Interface || kind == reflect.Invalid {
+ return nil, &InvalidEncodeError{reflect.TypeOf(v)}
+ }
+
+ enc := e.dataPool.Get().(*encoder)
+ enc.values = make(url.Values)
+
+ if kind == reflect.Struct && val.Type() != timeType {
+ enc.traverseStruct(val, enc.namespace[0:0], -1)
+ } else {
+ enc.setFieldByType(val, enc.namespace[0:0], -1, false)
+ }
+
+ if len(enc.errs) > 0 {
+ err = enc.errs
+ enc.errs = nil
+ }
+
+ values = enc.values
+
+ e.dataPool.Put(enc)
+
+ return
+}
diff --git a/vendor/github.com/go-playground/form/logo.jpg b/vendor/github.com/go-playground/form/logo.jpg
new file mode 100644
index 0000000..2ef34f8
Binary files /dev/null and b/vendor/github.com/go-playground/form/logo.jpg differ
diff --git a/vendor/github.com/go-playground/form/util.go b/vendor/github.com/go-playground/form/util.go
new file mode 100644
index 0000000..13db877
--- /dev/null
+++ b/vendor/github.com/go-playground/form/util.go
@@ -0,0 +1,56 @@
+package form
+
+import (
+ "reflect"
+ "strconv"
+)
+
+// ExtractType gets the actual underlying type of field value.
+// it is exposed for use within you Custom Functions
+func ExtractType(current reflect.Value) (reflect.Value, reflect.Kind) {
+
+ switch current.Kind() {
+ case reflect.Ptr:
+
+ if current.IsNil() {
+ return current, reflect.Ptr
+ }
+
+ return ExtractType(current.Elem())
+
+ case reflect.Interface:
+
+ if current.IsNil() {
+ return current, reflect.Interface
+ }
+
+ return ExtractType(current.Elem())
+
+ default:
+ return current, current.Kind()
+ }
+}
+
+func parseBool(str string) (bool, error) {
+
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "on", "yes", "ok":
+ return true, nil
+ case "", "0", "f", "F", "false", "FALSE", "False", "off", "no":
+ return false, nil
+ }
+
+ // strconv.NumError mimicing exactly the strconv.ParseBool(..) error and type
+ // to ensure compatibility with std library and beyond.
+ return false, &strconv.NumError{Func: "ParseBool", Num: str, Err: strconv.ErrSyntax}
+}
+
+// hasValue determines if a reflect.Value is it's default value
+func hasValue(field reflect.Value) bool {
+ switch field.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+ return !field.IsNil()
+ default:
+ return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface()
+ }
+}
diff --git a/vendor/github.com/go-playground/locales/.gitignore b/vendor/github.com/go-playground/locales/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/go-playground/locales/.travis.yml b/vendor/github.com/go-playground/locales/.travis.yml
new file mode 100644
index 0000000..d50237a
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/.travis.yml
@@ -0,0 +1,26 @@
+language: go
+go:
+ - 1.13.1
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+
+notifications:
+ email:
+ recipients: dean.karn@gmail.com
+ on_success: change
+ on_failure: always
+
+before_install:
+ - go install github.com/mattn/goveralls
+
+# Only clone the most recent commit.
+git:
+ depth: 1
+
+script:
+ - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
+
+after_success: |
+ goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/locales/LICENSE b/vendor/github.com/go-playground/locales/LICENSE
new file mode 100644
index 0000000..75854ac
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Go Playground
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md
new file mode 100644
index 0000000..ba1b068
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/README.md
@@ -0,0 +1,172 @@
+## locales
+![Project status](https://img.shields.io/badge/version-0.13.0-green.svg)
+[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales)
+[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales)
+![License](https://img.shields.io/dub/l/vibe-d.svg)
+[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
+an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).
+
+Features
+--------
+- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1
+- [x] Contains Cardinal, Ordinal and Range Plural Rules
+- [x] Contains Month, Weekday and Timezone translations built in
+- [x] Contains Date & Time formatting functions
+- [x] Contains Number, Currency, Accounting and Percent formatting functions
+- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
+
+Full Tests
+--------------------
+I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment;
+any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details.
+
+Installation
+-----------
+
+Use go get
+
+```shell
+go get github.com/go-playground/locales
+```
+
+NOTES
+--------
+You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body
+of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string.
+
+Usage
+-------
+```go
+package main
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/go-playground/locales/currency"
+ "github.com/go-playground/locales/en_CA"
+)
+
+func main() {
+
+ loc, _ := time.LoadLocation("America/Toronto")
+ datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc)
+
+ l := en_CA.New()
+
+ // Dates
+ fmt.Println(l.FmtDateFull(datetime))
+ fmt.Println(l.FmtDateLong(datetime))
+ fmt.Println(l.FmtDateMedium(datetime))
+ fmt.Println(l.FmtDateShort(datetime))
+
+ // Times
+ fmt.Println(l.FmtTimeFull(datetime))
+ fmt.Println(l.FmtTimeLong(datetime))
+ fmt.Println(l.FmtTimeMedium(datetime))
+ fmt.Println(l.FmtTimeShort(datetime))
+
+ // Months Wide
+ fmt.Println(l.MonthWide(time.January))
+ fmt.Println(l.MonthWide(time.February))
+ fmt.Println(l.MonthWide(time.March))
+ // ...
+
+ // Months Abbreviated
+ fmt.Println(l.MonthAbbreviated(time.January))
+ fmt.Println(l.MonthAbbreviated(time.February))
+ fmt.Println(l.MonthAbbreviated(time.March))
+ // ...
+
+ // Months Narrow
+ fmt.Println(l.MonthNarrow(time.January))
+ fmt.Println(l.MonthNarrow(time.February))
+ fmt.Println(l.MonthNarrow(time.March))
+ // ...
+
+ // Weekdays Wide
+ fmt.Println(l.WeekdayWide(time.Sunday))
+ fmt.Println(l.WeekdayWide(time.Monday))
+ fmt.Println(l.WeekdayWide(time.Tuesday))
+ // ...
+
+ // Weekdays Abbreviated
+ fmt.Println(l.WeekdayAbbreviated(time.Sunday))
+ fmt.Println(l.WeekdayAbbreviated(time.Monday))
+ fmt.Println(l.WeekdayAbbreviated(time.Tuesday))
+ // ...
+
+ // Weekdays Short
+ fmt.Println(l.WeekdayShort(time.Sunday))
+ fmt.Println(l.WeekdayShort(time.Monday))
+ fmt.Println(l.WeekdayShort(time.Tuesday))
+ // ...
+
+ // Weekdays Narrow
+ fmt.Println(l.WeekdayNarrow(time.Sunday))
+ fmt.Println(l.WeekdayNarrow(time.Monday))
+ fmt.Println(l.WeekdayNarrow(time.Tuesday))
+ // ...
+
+ var f64 float64
+
+ f64 = -10356.4523
+
+ // Number
+ fmt.Println(l.FmtNumber(f64, 2))
+
+ // Currency
+ fmt.Println(l.FmtCurrency(f64, 2, currency.CAD))
+ fmt.Println(l.FmtCurrency(f64, 2, currency.USD))
+
+ // Accounting
+ fmt.Println(l.FmtAccounting(f64, 2, currency.CAD))
+ fmt.Println(l.FmtAccounting(f64, 2, currency.USD))
+
+ f64 = 78.12
+
+ // Percent
+ fmt.Println(l.FmtPercent(f64, 0))
+
+ // Plural Rules for locale, so you know what rules you must cover
+ fmt.Println(l.PluralsCardinal())
+ fmt.Println(l.PluralsOrdinal())
+
+ // Cardinal Plural Rules
+ fmt.Println(l.CardinalPluralRule(1, 0))
+ fmt.Println(l.CardinalPluralRule(1.0, 0))
+ fmt.Println(l.CardinalPluralRule(1.0, 1))
+ fmt.Println(l.CardinalPluralRule(3, 0))
+
+ // Ordinal Plural Rules
+ fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st
+ fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd
+ fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd
+ fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th
+
+ // Range Plural Rules
+ fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1
+ fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2
+ fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8
+}
+```
+
+NOTES:
+-------
+These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues
+I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time
+these locales are regenerated the fix will come with.
+
+I do however realize that time constraints are often important and so there are two options:
+
+1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface.
+2. Add an exception in the locale generation code directly and once regenerated, fix will be in place.
+
+Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated.
+
+License
+------
+Distributed under MIT License, please see license file in code for more details.
diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go
new file mode 100644
index 0000000..cdaba59
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/currency/currency.go
@@ -0,0 +1,308 @@
+package currency
+
+// Type is the currency type associated with the locales currency enum
+type Type int
+
+// locale currencies
+const (
+ ADP Type = iota
+ AED
+ AFA
+ AFN
+ ALK
+ ALL
+ AMD
+ ANG
+ AOA
+ AOK
+ AON
+ AOR
+ ARA
+ ARL
+ ARM
+ ARP
+ ARS
+ ATS
+ AUD
+ AWG
+ AZM
+ AZN
+ BAD
+ BAM
+ BAN
+ BBD
+ BDT
+ BEC
+ BEF
+ BEL
+ BGL
+ BGM
+ BGN
+ BGO
+ BHD
+ BIF
+ BMD
+ BND
+ BOB
+ BOL
+ BOP
+ BOV
+ BRB
+ BRC
+ BRE
+ BRL
+ BRN
+ BRR
+ BRZ
+ BSD
+ BTN
+ BUK
+ BWP
+ BYB
+ BYN
+ BYR
+ BZD
+ CAD
+ CDF
+ CHE
+ CHF
+ CHW
+ CLE
+ CLF
+ CLP
+ CNH
+ CNX
+ CNY
+ COP
+ COU
+ CRC
+ CSD
+ CSK
+ CUC
+ CUP
+ CVE
+ CYP
+ CZK
+ DDM
+ DEM
+ DJF
+ DKK
+ DOP
+ DZD
+ ECS
+ ECV
+ EEK
+ EGP
+ ERN
+ ESA
+ ESB
+ ESP
+ ETB
+ EUR
+ FIM
+ FJD
+ FKP
+ FRF
+ GBP
+ GEK
+ GEL
+ GHC
+ GHS
+ GIP
+ GMD
+ GNF
+ GNS
+ GQE
+ GRD
+ GTQ
+ GWE
+ GWP
+ GYD
+ HKD
+ HNL
+ HRD
+ HRK
+ HTG
+ HUF
+ IDR
+ IEP
+ ILP
+ ILR
+ ILS
+ INR
+ IQD
+ IRR
+ ISJ
+ ISK
+ ITL
+ JMD
+ JOD
+ JPY
+ KES
+ KGS
+ KHR
+ KMF
+ KPW
+ KRH
+ KRO
+ KRW
+ KWD
+ KYD
+ KZT
+ LAK
+ LBP
+ LKR
+ LRD
+ LSL
+ LTL
+ LTT
+ LUC
+ LUF
+ LUL
+ LVL
+ LVR
+ LYD
+ MAD
+ MAF
+ MCF
+ MDC
+ MDL
+ MGA
+ MGF
+ MKD
+ MKN
+ MLF
+ MMK
+ MNT
+ MOP
+ MRO
+ MTL
+ MTP
+ MUR
+ MVP
+ MVR
+ MWK
+ MXN
+ MXP
+ MXV
+ MYR
+ MZE
+ MZM
+ MZN
+ NAD
+ NGN
+ NIC
+ NIO
+ NLG
+ NOK
+ NPR
+ NZD
+ OMR
+ PAB
+ PEI
+ PEN
+ PES
+ PGK
+ PHP
+ PKR
+ PLN
+ PLZ
+ PTE
+ PYG
+ QAR
+ RHD
+ ROL
+ RON
+ RSD
+ RUB
+ RUR
+ RWF
+ SAR
+ SBD
+ SCR
+ SDD
+ SDG
+ SDP
+ SEK
+ SGD
+ SHP
+ SIT
+ SKK
+ SLL
+ SOS
+ SRD
+ SRG
+ SSP
+ STD
+ STN
+ SUR
+ SVC
+ SYP
+ SZL
+ THB
+ TJR
+ TJS
+ TMM
+ TMT
+ TND
+ TOP
+ TPE
+ TRL
+ TRY
+ TTD
+ TWD
+ TZS
+ UAH
+ UAK
+ UGS
+ UGX
+ USD
+ USN
+ USS
+ UYI
+ UYP
+ UYU
+ UZS
+ VEB
+ VEF
+ VND
+ VNN
+ VUV
+ WST
+ XAF
+ XAG
+ XAU
+ XBA
+ XBB
+ XBC
+ XBD
+ XCD
+ XDR
+ XEU
+ XFO
+ XFU
+ XOF
+ XPD
+ XPF
+ XPT
+ XRE
+ XSU
+ XTS
+ XUA
+ XXX
+ YDD
+ YER
+ YUD
+ YUM
+ YUN
+ YUR
+ ZAL
+ ZAR
+ ZMK
+ ZMW
+ ZRN
+ ZRZ
+ ZWD
+ ZWL
+ ZWR
+)
diff --git a/vendor/github.com/go-playground/locales/go.mod b/vendor/github.com/go-playground/locales/go.mod
new file mode 100644
index 0000000..34ab6f2
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/go.mod
@@ -0,0 +1,5 @@
+module github.com/go-playground/locales
+
+go 1.13
+
+require golang.org/x/text v0.3.2
diff --git a/vendor/github.com/go-playground/locales/go.sum b/vendor/github.com/go-playground/locales/go.sum
new file mode 100644
index 0000000..63c9200
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/go.sum
@@ -0,0 +1,3 @@
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/vendor/github.com/go-playground/locales/logo.png b/vendor/github.com/go-playground/locales/logo.png
new file mode 100644
index 0000000..3038276
Binary files /dev/null and b/vendor/github.com/go-playground/locales/logo.png differ
diff --git a/vendor/github.com/go-playground/locales/rules.go b/vendor/github.com/go-playground/locales/rules.go
new file mode 100644
index 0000000..9202900
--- /dev/null
+++ b/vendor/github.com/go-playground/locales/rules.go
@@ -0,0 +1,293 @@
+package locales
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/go-playground/locales/currency"
+)
+
+// // ErrBadNumberValue is returned when the number passed for
+// // plural rule determination cannot be parsed
+// type ErrBadNumberValue struct {
+// NumberValue string
+// InnerError error
+// }
+
+// // Error returns ErrBadNumberValue error string
+// func (e *ErrBadNumberValue) Error() string {
+// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError)
+// }
+
+// var _ error = new(ErrBadNumberValue)
+
+// PluralRule denotes the type of plural rules
+type PluralRule int
+
+// PluralRule's
+const (
+ PluralRuleUnknown PluralRule = iota
+ PluralRuleZero // zero
+ PluralRuleOne // one - singular
+ PluralRuleTwo // two - dual
+ PluralRuleFew // few - paucal
+ PluralRuleMany // many - also used for fractions if they have a separate class
+ PluralRuleOther // other - required—general plural form—also used if the language only has a single form
+)
+
+const (
+ pluralsString = "UnknownZeroOneTwoFewManyOther"
+)
+
+// Translator encapsulates an instance of a locale
+// NOTE: some values are returned as a []byte just in case the caller
+// wishes to add more and can help avoid allocations; otherwise just cast as string
+type Translator interface {
+
+ // The following Functions are for overriding, debugging or developing
+ // with a Translator Locale
+
+ // Locale returns the string value of the translator
+ Locale() string
+
+ // returns an array of cardinal plural rules associated
+ // with this translator
+ PluralsCardinal() []PluralRule
+
+ // returns an array of ordinal plural rules associated
+ // with this translator
+ PluralsOrdinal() []PluralRule
+
+ // returns an array of range plural rules associated
+ // with this translator
+ PluralsRange() []PluralRule
+
+ // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale
+ CardinalPluralRule(num float64, v uint64) PluralRule
+
+ // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale
+ OrdinalPluralRule(num float64, v uint64) PluralRule
+
+ // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale
+ RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule
+
+ // returns the locales abbreviated month given the 'month' provided
+ MonthAbbreviated(month time.Month) string
+
+ // returns the locales abbreviated months
+ MonthsAbbreviated() []string
+
+ // returns the locales narrow month given the 'month' provided
+ MonthNarrow(month time.Month) string
+
+ // returns the locales narrow months
+ MonthsNarrow() []string
+
+ // returns the locales wide month given the 'month' provided
+ MonthWide(month time.Month) string
+
+ // returns the locales wide months
+ MonthsWide() []string
+
+ // returns the locales abbreviated weekday given the 'weekday' provided
+ WeekdayAbbreviated(weekday time.Weekday) string
+
+ // returns the locales abbreviated weekdays
+ WeekdaysAbbreviated() []string
+
+ // returns the locales narrow weekday given the 'weekday' provided
+ WeekdayNarrow(weekday time.Weekday) string
+
+ // WeekdaysNarrowreturns the locales narrow weekdays
+ WeekdaysNarrow() []string
+
+ // returns the locales short weekday given the 'weekday' provided
+ WeekdayShort(weekday time.Weekday) string
+
+ // returns the locales short weekdays
+ WeekdaysShort() []string
+
+ // returns the locales wide weekday given the 'weekday' provided
+ WeekdayWide(weekday time.Weekday) string
+
+ // returns the locales wide weekdays
+ WeekdaysWide() []string
+
+ // The following Functions are common Formatting functionsfor the Translator's Locale
+
+ // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
+ FmtNumber(num float64, v uint64) string
+
+ // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v'
+ // NOTE: 'num' passed into FmtPercent is assumed to be in percent already
+ FmtPercent(num float64, v uint64) string
+
+ // returns the currency representation of 'num' with digits/precision of 'v' for locale
+ FmtCurrency(num float64, v uint64, currency currency.Type) string
+
+ // returns the currency representation of 'num' with digits/precision of 'v' for locale
+ // in accounting notation.
+ FmtAccounting(num float64, v uint64, currency currency.Type) string
+
+ // returns the short date representation of 't' for locale
+ FmtDateShort(t time.Time) string
+
+ // returns the medium date representation of 't' for locale
+ FmtDateMedium(t time.Time) string
+
+ // returns the long date representation of 't' for locale
+ FmtDateLong(t time.Time) string
+
+ // returns the full date representation of 't' for locale
+ FmtDateFull(t time.Time) string
+
+ // returns the short time representation of 't' for locale
+ FmtTimeShort(t time.Time) string
+
+ // returns the medium time representation of 't' for locale
+ FmtTimeMedium(t time.Time) string
+
+ // returns the long time representation of 't' for locale
+ FmtTimeLong(t time.Time) string
+
+ // returns the full time representation of 't' for locale
+ FmtTimeFull(t time.Time) string
+}
+
+// String returns the string value of PluralRule
+func (p PluralRule) String() string {
+
+ switch p {
+ case PluralRuleZero:
+ return pluralsString[7:11]
+ case PluralRuleOne:
+ return pluralsString[11:14]
+ case PluralRuleTwo:
+ return pluralsString[14:17]
+ case PluralRuleFew:
+ return pluralsString[17:20]
+ case PluralRuleMany:
+ return pluralsString[20:24]
+ case PluralRuleOther:
+ return pluralsString[24:]
+ default:
+ return pluralsString[:7]
+ }
+}
+
+//
+// Precision Notes:
+//
+// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh
+//
+// v := float64(3.141)
+// i := float64(int64(v))
+//
+// fmt.Println(v - i)
+//
+// or
+//
+// s := strconv.FormatFloat(v-i, 'f', -1, 64)
+// fmt.Println(s)
+//
+// these will not print what you'd expect: 0.14100000000000001
+// and so this library requires a precision to be specified, or
+// inaccurate plural rules could be applied.
+//
+//
+//
+// n - absolute value of the source number (integer and decimals).
+// i - integer digits of n.
+// v - number of visible fraction digits in n, with trailing zeros.
+// w - number of visible fraction digits in n, without trailing zeros.
+// f - visible fractional digits in n, with trailing zeros.
+// t - visible fractional digits in n, without trailing zeros.
+//
+//
+// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above.
+//
+// n := math.Abs(num)
+// i := int64(n)
+// v := v
+//
+//
+// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's....
+// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64
+// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's....
+//
+//
+//
+// General Inclusion Rules
+// - v will always be available inherently
+// - all require n
+// - w requires i
+//
+
+// W returns the number of visible fraction digits in N, without trailing zeros.
+func W(n float64, v uint64) (w int64) {
+
+ s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
+
+ // with either be '0' or '0.xxxx', so if 1 then w will be zero
+ // otherwise need to parse
+ if len(s) != 1 {
+
+ s = s[2:]
+ end := len(s) + 1
+
+ for i := end; i >= 0; i-- {
+ if s[i] != '0' {
+ end = i + 1
+ break
+ }
+ }
+
+ w = int64(len(s[:end]))
+ }
+
+ return
+}
+
+// F returns the visible fractional digits in N, with trailing zeros.
+func F(n float64, v uint64) (f int64) {
+
+ s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
+
+ // with either be '0' or '0.xxxx', so if 1 then f will be zero
+ // otherwise need to parse
+ if len(s) != 1 {
+
+ // ignoring error, because it can't fail as we generated
+ // the string internally from a real number
+ f, _ = strconv.ParseInt(s[2:], 10, 64)
+ }
+
+ return
+}
+
+// T returns the visible fractional digits in N, without trailing zeros.
+func T(n float64, v uint64) (t int64) {
+
+ s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64)
+
+ // with either be '0' or '0.xxxx', so if 1 then t will be zero
+ // otherwise need to parse
+ if len(s) != 1 {
+
+ s = s[2:]
+ end := len(s) + 1
+
+ for i := end; i >= 0; i-- {
+ if s[i] != '0' {
+ end = i + 1
+ break
+ }
+ }
+
+ // ignoring error, because it can't fail as we generated
+ // the string internally from a real number
+ t, _ = strconv.ParseInt(s[:end], 10, 64)
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-playground/universal-translator/.gitignore b/vendor/github.com/go-playground/universal-translator/.gitignore
new file mode 100644
index 0000000..bc4e07f
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.coverprofile
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/universal-translator/.travis.yml b/vendor/github.com/go-playground/universal-translator/.travis.yml
new file mode 100644
index 0000000..39b8b92
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/.travis.yml
@@ -0,0 +1,27 @@
+language: go
+go:
+ - 1.13.4
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+
+notifications:
+ email:
+ recipients: dean.karn@gmail.com
+ on_success: change
+ on_failure: always
+
+before_install:
+ - go install github.com/mattn/goveralls
+
+# Only clone the most recent commit.
+git:
+ depth: 1
+
+script:
+ - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
+
+after_success: |
+ [ $TRAVIS_GO_VERSION = 1.13.4 ] &&
+ goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/universal-translator/LICENSE b/vendor/github.com/go-playground/universal-translator/LICENSE
new file mode 100644
index 0000000..8d8aba1
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Go Playground
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md
new file mode 100644
index 0000000..071f33a
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/README.md
@@ -0,0 +1,89 @@
+## universal-translator
+![Project status](https://img.shields.io/badge/version-0.17.0-green.svg)
+[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator)
+[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator)
+[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator)
+![License](https://img.shields.io/dub/l/vibe-d.svg)
+[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+
+Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules
+
+Why another i18n library?
+--------------------------
+Because none of the plural rules seem to be correct out there, including the previous implementation of this package,
+so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package
+is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for
+use in your applications.
+
+Features
+--------
+- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3
+- [x] Contains Cardinal, Ordinal and Range Plural Rules
+- [x] Contains Month, Weekday and Timezone translations built in
+- [x] Contains Date & Time formatting functions
+- [x] Contains Number, Currency, Accounting and Percent formatting functions
+- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere )
+- [x] Support loading translations from files
+- [x] Exporting translations to file(s), mainly for getting them professionally translated
+- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated
+- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1)
+
+Installation
+-----------
+
+Use go get
+
+```shell
+go get github.com/go-playground/universal-translator
+```
+
+Usage & Documentation
+-------
+
+Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs
+
+##### Examples:
+
+- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic)
+- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files)
+- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files)
+
+File formatting
+--------------
+All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s);
+they are only separated for easy viewing.
+
+##### Examples:
+
+- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
+
+##### Basic Makeup
+NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats)
+```json
+{
+ "locale": "en",
+ "key": "days-left",
+ "trans": "You have {0} day left.",
+ "type": "Cardinal",
+ "rule": "One",
+ "override": false
+}
+```
+|Field|Description|
+|---|---|
+|locale|The locale for which the translation is for.|
+|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.|
+|trans|The actual translation text.|
+|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)|
+|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)|
+|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.|
+
+Help With Tests
+---------------
+To anyone interesting in helping or contributing, I sure could use some help creating tests for each language.
+Please see issue [here](https://github.com/go-playground/locales/issues/1) for details.
+
+License
+------
+Distributed under MIT License, please see license file in code for more details.
diff --git a/vendor/github.com/go-playground/universal-translator/errors.go b/vendor/github.com/go-playground/universal-translator/errors.go
new file mode 100644
index 0000000..38b163b
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/errors.go
@@ -0,0 +1,148 @@
+package ut
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/go-playground/locales"
+)
+
+var (
+ // ErrUnknowTranslation indicates the translation could not be found
+ ErrUnknowTranslation = errors.New("Unknown Translation")
+)
+
+var _ error = new(ErrConflictingTranslation)
+var _ error = new(ErrRangeTranslation)
+var _ error = new(ErrOrdinalTranslation)
+var _ error = new(ErrCardinalTranslation)
+var _ error = new(ErrMissingPluralTranslation)
+var _ error = new(ErrExistingTranslator)
+
+// ErrExistingTranslator is the error representing a conflicting translator
+type ErrExistingTranslator struct {
+ locale string
+}
+
+// Error returns ErrExistingTranslator's internal error text
+func (e *ErrExistingTranslator) Error() string {
+ return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale)
+}
+
+// ErrConflictingTranslation is the error representing a conflicting translation
+type ErrConflictingTranslation struct {
+ locale string
+ key interface{}
+ rule locales.PluralRule
+ text string
+}
+
+// Error returns ErrConflictingTranslation's internal error text
+func (e *ErrConflictingTranslation) Error() string {
+
+ if _, ok := e.key.(string); !ok {
+ return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
+ }
+
+ return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale)
+}
+
+// ErrRangeTranslation is the error representing a range translation error
+type ErrRangeTranslation struct {
+ text string
+}
+
+// Error returns ErrRangeTranslation's internal error text
+func (e *ErrRangeTranslation) Error() string {
+ return e.text
+}
+
+// ErrOrdinalTranslation is the error representing an ordinal translation error
+type ErrOrdinalTranslation struct {
+ text string
+}
+
+// Error returns ErrOrdinalTranslation's internal error text
+func (e *ErrOrdinalTranslation) Error() string {
+ return e.text
+}
+
+// ErrCardinalTranslation is the error representing a cardinal translation error
+type ErrCardinalTranslation struct {
+ text string
+}
+
+// Error returns ErrCardinalTranslation's internal error text
+func (e *ErrCardinalTranslation) Error() string {
+ return e.text
+}
+
+// ErrMissingPluralTranslation is the error signifying a missing translation given
+// the locales plural rules.
+type ErrMissingPluralTranslation struct {
+ locale string
+ key interface{}
+ rule locales.PluralRule
+ translationType string
+}
+
+// Error returns ErrMissingPluralTranslation's internal error text
+func (e *ErrMissingPluralTranslation) Error() string {
+
+ if _, ok := e.key.(string); !ok {
+ return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
+ }
+
+ return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale)
+}
+
+// ErrMissingBracket is the error representing a missing bracket in a translation
+// eg. This is a {0 <-- missing ending '}'
+type ErrMissingBracket struct {
+ locale string
+ key interface{}
+ text string
+}
+
+// Error returns ErrMissingBracket error message
+func (e *ErrMissingBracket) Error() string {
+ return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text)
+}
+
+// ErrBadParamSyntax is the error representing a bad parameter definition in a translation
+// eg. This is a {must-be-int}
+type ErrBadParamSyntax struct {
+ locale string
+ param string
+ key interface{}
+ text string
+}
+
+// Error returns ErrBadParamSyntax error message
+func (e *ErrBadParamSyntax) Error() string {
+ return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text)
+}
+
+// import/export errors
+
+// ErrMissingLocale is the error representing an expected locale that could
+// not be found aka locale not registered with the UniversalTranslator Instance
+type ErrMissingLocale struct {
+ locale string
+}
+
+// Error returns ErrMissingLocale's internal error text
+func (e *ErrMissingLocale) Error() string {
+ return fmt.Sprintf("error: locale '%s' not registered.", e.locale)
+}
+
+// ErrBadPluralDefinition is the error representing an incorrect plural definition
+// usually found within translations defined within files during the import process.
+type ErrBadPluralDefinition struct {
+ tl translation
+}
+
+// Error returns ErrBadPluralDefinition's internal error text
+func (e *ErrBadPluralDefinition) Error() string {
+ return fmt.Sprintf("error: bad plural definition '%#v'", e.tl)
+}
diff --git a/vendor/github.com/go-playground/universal-translator/go.mod b/vendor/github.com/go-playground/universal-translator/go.mod
new file mode 100644
index 0000000..8079590
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/go.mod
@@ -0,0 +1,5 @@
+module github.com/go-playground/universal-translator
+
+go 1.13
+
+require github.com/go-playground/locales v0.13.0
diff --git a/vendor/github.com/go-playground/universal-translator/go.sum b/vendor/github.com/go-playground/universal-translator/go.sum
new file mode 100644
index 0000000..cbbf324
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/go.sum
@@ -0,0 +1,4 @@
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go
new file mode 100644
index 0000000..7bd76f2
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/import_export.go
@@ -0,0 +1,274 @@
+package ut
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "io"
+
+ "github.com/go-playground/locales"
+)
+
+type translation struct {
+ Locale string `json:"locale"`
+ Key interface{} `json:"key"` // either string or integer
+ Translation string `json:"trans"`
+ PluralType string `json:"type,omitempty"`
+ PluralRule string `json:"rule,omitempty"`
+ OverrideExisting bool `json:"override,omitempty"`
+}
+
+const (
+ cardinalType = "Cardinal"
+ ordinalType = "Ordinal"
+ rangeType = "Range"
+)
+
+// ImportExportFormat is the format of the file import or export
+type ImportExportFormat uint8
+
+// supported Export Formats
+const (
+ FormatJSON ImportExportFormat = iota
+)
+
+// Export writes the translations out to a file on disk.
+//
+// NOTE: this currently only works with string or int translations keys.
+func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
+
+ _, err := os.Stat(dirname)
+ fmt.Println(dirname, err, os.IsNotExist(err))
+ if err != nil {
+
+ if !os.IsNotExist(err) {
+ return err
+ }
+
+ if err = os.MkdirAll(dirname, 0744); err != nil {
+ return err
+ }
+ }
+
+ // build up translations
+ var trans []translation
+ var b []byte
+ var ext string
+
+ for _, locale := range t.translators {
+
+ for k, v := range locale.(*translator).translations {
+ trans = append(trans, translation{
+ Locale: locale.Locale(),
+ Key: k,
+ Translation: v.text,
+ })
+ }
+
+ for k, pluralTrans := range locale.(*translator).cardinalTanslations {
+
+ for i, plural := range pluralTrans {
+
+ // leave enough for all plural rules
+ // but not all are set for all languages.
+ if plural == nil {
+ continue
+ }
+
+ trans = append(trans, translation{
+ Locale: locale.Locale(),
+ Key: k.(string),
+ Translation: plural.text,
+ PluralType: cardinalType,
+ PluralRule: locales.PluralRule(i).String(),
+ })
+ }
+ }
+
+ for k, pluralTrans := range locale.(*translator).ordinalTanslations {
+
+ for i, plural := range pluralTrans {
+
+ // leave enough for all plural rules
+ // but not all are set for all languages.
+ if plural == nil {
+ continue
+ }
+
+ trans = append(trans, translation{
+ Locale: locale.Locale(),
+ Key: k.(string),
+ Translation: plural.text,
+ PluralType: ordinalType,
+ PluralRule: locales.PluralRule(i).String(),
+ })
+ }
+ }
+
+ for k, pluralTrans := range locale.(*translator).rangeTanslations {
+
+ for i, plural := range pluralTrans {
+
+ // leave enough for all plural rules
+ // but not all are set for all languages.
+ if plural == nil {
+ continue
+ }
+
+ trans = append(trans, translation{
+ Locale: locale.Locale(),
+ Key: k.(string),
+ Translation: plural.text,
+ PluralType: rangeType,
+ PluralRule: locales.PluralRule(i).String(),
+ })
+ }
+ }
+
+ switch format {
+ case FormatJSON:
+ b, err = json.MarshalIndent(trans, "", " ")
+ ext = ".json"
+ }
+
+ if err != nil {
+ return err
+ }
+
+ err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
+ if err != nil {
+ return err
+ }
+
+ trans = trans[0:0]
+ }
+
+ return nil
+}
+
+// Import reads the translations out of a file or directory on disk.
+//
+// NOTE: this currently only works with string or int translations keys.
+func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error {
+
+ fi, err := os.Stat(dirnameOrFilename)
+ if err != nil {
+ return err
+ }
+
+ processFn := func(filename string) error {
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return t.ImportByReader(format, f)
+ }
+
+ if !fi.IsDir() {
+ return processFn(dirnameOrFilename)
+ }
+
+ // recursively go through directory
+ walker := func(path string, info os.FileInfo, err error) error {
+
+ if info.IsDir() {
+ return nil
+ }
+
+ switch format {
+ case FormatJSON:
+ // skip non JSON files
+ if filepath.Ext(info.Name()) != ".json" {
+ return nil
+ }
+ }
+
+ return processFn(path)
+ }
+
+ return filepath.Walk(dirnameOrFilename, walker)
+}
+
+// ImportByReader imports the the translations found within the contents read from the supplied reader.
+//
+// NOTE: generally used when assets have been embedded into the binary and are already in memory.
+func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
+
+ b, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+
+ var trans []translation
+
+ switch format {
+ case FormatJSON:
+ err = json.Unmarshal(b, &trans)
+ }
+
+ if err != nil {
+ return err
+ }
+
+ for _, tl := range trans {
+
+ locale, found := t.FindTranslator(tl.Locale)
+ if !found {
+ return &ErrMissingLocale{locale: tl.Locale}
+ }
+
+ pr := stringToPR(tl.PluralRule)
+
+ if pr == locales.PluralRuleUnknown {
+
+ err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting)
+ if err != nil {
+ return err
+ }
+
+ continue
+ }
+
+ switch tl.PluralType {
+ case cardinalType:
+ err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
+ case ordinalType:
+ err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting)
+ case rangeType:
+ err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting)
+ default:
+ return &ErrBadPluralDefinition{tl: tl}
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func stringToPR(s string) locales.PluralRule {
+
+ switch s {
+ case "One":
+ return locales.PluralRuleOne
+ case "Two":
+ return locales.PluralRuleTwo
+ case "Few":
+ return locales.PluralRuleFew
+ case "Many":
+ return locales.PluralRuleMany
+ case "Other":
+ return locales.PluralRuleOther
+ default:
+ return locales.PluralRuleUnknown
+ }
+
+}
diff --git a/vendor/github.com/go-playground/universal-translator/logo.png b/vendor/github.com/go-playground/universal-translator/logo.png
new file mode 100644
index 0000000..a37aa8c
Binary files /dev/null and b/vendor/github.com/go-playground/universal-translator/logo.png differ
diff --git a/vendor/github.com/go-playground/universal-translator/translator.go b/vendor/github.com/go-playground/universal-translator/translator.go
new file mode 100644
index 0000000..cfafce8
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/translator.go
@@ -0,0 +1,420 @@
+package ut
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/go-playground/locales"
+)
+
+const (
+ paramZero = "{0}"
+ paramOne = "{1}"
+ unknownTranslation = ""
+)
+
+// Translator is universal translators
+// translator instance which is a thin wrapper
+// around locales.Translator instance providing
+// some extra functionality
+type Translator interface {
+ locales.Translator
+
+ // adds a normal translation for a particular language/locale
+ // {#} is the only replacement type accepted and are ad infinitum
+ // eg. one: '{0} day left' other: '{0} days left'
+ Add(key interface{}, text string, override bool) error
+
+ // adds a cardinal plural translation for a particular language/locale
+ // {0} is the only replacement type accepted and only one variable is accepted as
+ // multiple cannot be used for a plural rule determination, unless it is a range;
+ // see AddRange below.
+ // eg. in locale 'en' one: '{0} day left' other: '{0} days left'
+ AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error
+
+ // adds an ordinal plural translation for a particular language/locale
+ // {0} is the only replacement type accepted and only one variable is accepted as
+ // multiple cannot be used for a plural rule determination, unless it is a range;
+ // see AddRange below.
+ // eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring'
+ // - 1st, 2nd, 3rd...
+ AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error
+
+ // adds a range plural translation for a particular language/locale
+ // {0} and {1} are the only replacement types accepted and only these are accepted.
+ // eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
+ AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error
+
+ // creates the translation for the locale given the 'key' and params passed in
+ T(key interface{}, params ...string) (string, error)
+
+ // creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments
+ // and param passed in
+ C(key interface{}, num float64, digits uint64, param string) (string, error)
+
+ // creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments
+ // and param passed in
+ O(key interface{}, num float64, digits uint64, param string) (string, error)
+
+ // creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and
+ // 'digit2' arguments and 'param1' and 'param2' passed in
+ R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error)
+
+ // VerifyTranslations checks to ensures that no plural rules have been
+ // missed within the translations.
+ VerifyTranslations() error
+}
+
+var _ Translator = new(translator)
+var _ locales.Translator = new(translator)
+
+type translator struct {
+ locales.Translator
+ translations map[interface{}]*transText
+ cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown
+ ordinalTanslations map[interface{}][]*transText
+ rangeTanslations map[interface{}][]*transText
+}
+
+type transText struct {
+ text string
+ indexes []int
+}
+
+func newTranslator(trans locales.Translator) Translator {
+ return &translator{
+ Translator: trans,
+ translations: make(map[interface{}]*transText), // translation text broken up by byte index
+ cardinalTanslations: make(map[interface{}][]*transText),
+ ordinalTanslations: make(map[interface{}][]*transText),
+ rangeTanslations: make(map[interface{}][]*transText),
+ }
+}
+
+// Add adds a normal translation for a particular language/locale
+// {#} is the only replacement type accepted and are ad infinitum
+// eg. one: '{0} day left' other: '{0} days left'
+func (t *translator) Add(key interface{}, text string, override bool) error {
+
+ if _, ok := t.translations[key]; ok && !override {
+ return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text}
+ }
+
+ lb := strings.Count(text, "{")
+ rb := strings.Count(text, "}")
+
+ if lb != rb {
+ return &ErrMissingBracket{locale: t.Locale(), key: key, text: text}
+ }
+
+ trans := &transText{
+ text: text,
+ }
+
+ var idx int
+
+ for i := 0; i < lb; i++ {
+ s := "{" + strconv.Itoa(i) + "}"
+ idx = strings.Index(text, s)
+ if idx == -1 {
+ return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text}
+ }
+
+ trans.indexes = append(trans.indexes, idx)
+ trans.indexes = append(trans.indexes, idx+len(s))
+ }
+
+ t.translations[key] = trans
+
+ return nil
+}
+
+// AddCardinal adds a cardinal plural translation for a particular language/locale
+// {0} is the only replacement type accepted and only one variable is accepted as
+// multiple cannot be used for a plural rule determination, unless it is a range;
+// see AddRange below.
+// eg. in locale 'en' one: '{0} day left' other: '{0} days left'
+func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
+
+ var verified bool
+
+ // verify plural rule exists for locale
+ for _, pr := range t.PluralsCardinal() {
+ if pr == rule {
+ verified = true
+ break
+ }
+ }
+
+ if !verified {
+ return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
+ }
+
+ tarr, ok := t.cardinalTanslations[key]
+ if ok {
+ // verify not adding a conflicting record
+ if len(tarr) > 0 && tarr[rule] != nil && !override {
+ return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
+ }
+
+ } else {
+ tarr = make([]*transText, 7, 7)
+ t.cardinalTanslations[key] = tarr
+ }
+
+ trans := &transText{
+ text: text,
+ indexes: make([]int, 2, 2),
+ }
+
+ tarr[rule] = trans
+
+ idx := strings.Index(text, paramZero)
+ if idx == -1 {
+ tarr[rule] = nil
+ return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
+ }
+
+ trans.indexes[0] = idx
+ trans.indexes[1] = idx + len(paramZero)
+
+ return nil
+}
+
+// AddOrdinal adds an ordinal plural translation for a particular language/locale
+// {0} is the only replacement type accepted and only one variable is accepted as
+// multiple cannot be used for a plural rule determination, unless it is a range;
+// see AddRange below.
+// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd...
+func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error {
+
+ var verified bool
+
+ // verify plural rule exists for locale
+ for _, pr := range t.PluralsOrdinal() {
+ if pr == rule {
+ verified = true
+ break
+ }
+ }
+
+ if !verified {
+ return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
+ }
+
+ tarr, ok := t.ordinalTanslations[key]
+ if ok {
+ // verify not adding a conflicting record
+ if len(tarr) > 0 && tarr[rule] != nil && !override {
+ return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
+ }
+
+ } else {
+ tarr = make([]*transText, 7, 7)
+ t.ordinalTanslations[key] = tarr
+ }
+
+ trans := &transText{
+ text: text,
+ indexes: make([]int, 2, 2),
+ }
+
+ tarr[rule] = trans
+
+ idx := strings.Index(text, paramZero)
+ if idx == -1 {
+ tarr[rule] = nil
+ return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
+ }
+
+ trans.indexes[0] = idx
+ trans.indexes[1] = idx + len(paramZero)
+
+ return nil
+}
+
+// AddRange adds a range plural translation for a particular language/locale
+// {0} and {1} are the only replacement types accepted and only these are accepted.
+// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left'
+func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error {
+
+ var verified bool
+
+ // verify plural rule exists for locale
+ for _, pr := range t.PluralsRange() {
+ if pr == rule {
+ verified = true
+ break
+ }
+ }
+
+ if !verified {
+ return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)}
+ }
+
+ tarr, ok := t.rangeTanslations[key]
+ if ok {
+ // verify not adding a conflicting record
+ if len(tarr) > 0 && tarr[rule] != nil && !override {
+ return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text}
+ }
+
+ } else {
+ tarr = make([]*transText, 7, 7)
+ t.rangeTanslations[key] = tarr
+ }
+
+ trans := &transText{
+ text: text,
+ indexes: make([]int, 4, 4),
+ }
+
+ tarr[rule] = trans
+
+ idx := strings.Index(text, paramZero)
+ if idx == -1 {
+ tarr[rule] = nil
+ return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)}
+ }
+
+ trans.indexes[0] = idx
+ trans.indexes[1] = idx + len(paramZero)
+
+ idx = strings.Index(text, paramOne)
+ if idx == -1 {
+ tarr[rule] = nil
+ return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)}
+ }
+
+ trans.indexes[2] = idx
+ trans.indexes[3] = idx + len(paramOne)
+
+ return nil
+}
+
+// T creates the translation for the locale given the 'key' and params passed in
+func (t *translator) T(key interface{}, params ...string) (string, error) {
+
+ trans, ok := t.translations[key]
+ if !ok {
+ return unknownTranslation, ErrUnknowTranslation
+ }
+
+ b := make([]byte, 0, 64)
+
+ var start, end, count int
+
+ for i := 0; i < len(trans.indexes); i++ {
+ end = trans.indexes[i]
+ b = append(b, trans.text[start:end]...)
+ b = append(b, params[count]...)
+ i++
+ start = trans.indexes[i]
+ count++
+ }
+
+ b = append(b, trans.text[start:]...)
+
+ return string(b), nil
+}
+
+// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
+func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) {
+
+ tarr, ok := t.cardinalTanslations[key]
+ if !ok {
+ return unknownTranslation, ErrUnknowTranslation
+ }
+
+ rule := t.CardinalPluralRule(num, digits)
+
+ trans := tarr[rule]
+
+ b := make([]byte, 0, 64)
+ b = append(b, trans.text[:trans.indexes[0]]...)
+ b = append(b, param...)
+ b = append(b, trans.text[trans.indexes[1]:]...)
+
+ return string(b), nil
+}
+
+// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in
+func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) {
+
+ tarr, ok := t.ordinalTanslations[key]
+ if !ok {
+ return unknownTranslation, ErrUnknowTranslation
+ }
+
+ rule := t.OrdinalPluralRule(num, digits)
+
+ trans := tarr[rule]
+
+ b := make([]byte, 0, 64)
+ b = append(b, trans.text[:trans.indexes[0]]...)
+ b = append(b, param...)
+ b = append(b, trans.text[trans.indexes[1]:]...)
+
+ return string(b), nil
+}
+
+// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments
+// and 'param1' and 'param2' passed in
+func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) {
+
+ tarr, ok := t.rangeTanslations[key]
+ if !ok {
+ return unknownTranslation, ErrUnknowTranslation
+ }
+
+ rule := t.RangePluralRule(num1, digits1, num2, digits2)
+
+ trans := tarr[rule]
+
+ b := make([]byte, 0, 64)
+ b = append(b, trans.text[:trans.indexes[0]]...)
+ b = append(b, param1...)
+ b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...)
+ b = append(b, param2...)
+ b = append(b, trans.text[trans.indexes[3]:]...)
+
+ return string(b), nil
+}
+
+// VerifyTranslations checks to ensures that no plural rules have been
+// missed within the translations.
+func (t *translator) VerifyTranslations() error {
+
+ for k, v := range t.cardinalTanslations {
+
+ for _, rule := range t.PluralsCardinal() {
+
+ if v[rule] == nil {
+ return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k}
+ }
+ }
+ }
+
+ for k, v := range t.ordinalTanslations {
+
+ for _, rule := range t.PluralsOrdinal() {
+
+ if v[rule] == nil {
+ return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k}
+ }
+ }
+ }
+
+ for k, v := range t.rangeTanslations {
+
+ for _, rule := range t.PluralsRange() {
+
+ if v[rule] == nil {
+ return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k}
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-playground/universal-translator/universal_translator.go b/vendor/github.com/go-playground/universal-translator/universal_translator.go
new file mode 100644
index 0000000..dbf707f
--- /dev/null
+++ b/vendor/github.com/go-playground/universal-translator/universal_translator.go
@@ -0,0 +1,113 @@
+package ut
+
+import (
+ "strings"
+
+ "github.com/go-playground/locales"
+)
+
+// UniversalTranslator holds all locale & translation data
+type UniversalTranslator struct {
+ translators map[string]Translator
+ fallback Translator
+}
+
+// New returns a new UniversalTranslator instance set with
+// the fallback locale and locales it should support
+func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator {
+
+ t := &UniversalTranslator{
+ translators: make(map[string]Translator),
+ }
+
+ for _, v := range supportedLocales {
+
+ trans := newTranslator(v)
+ t.translators[strings.ToLower(trans.Locale())] = trans
+
+ if fallback.Locale() == v.Locale() {
+ t.fallback = trans
+ }
+ }
+
+ if t.fallback == nil && fallback != nil {
+ t.fallback = newTranslator(fallback)
+ }
+
+ return t
+}
+
+// FindTranslator trys to find a Translator based on an array of locales
+// and returns the first one it can find, otherwise returns the
+// fallback translator.
+func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) {
+
+ for _, locale := range locales {
+
+ if trans, found = t.translators[strings.ToLower(locale)]; found {
+ return
+ }
+ }
+
+ return t.fallback, false
+}
+
+// GetTranslator returns the specified translator for the given locale,
+// or fallback if not found
+func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) {
+
+ if trans, found = t.translators[strings.ToLower(locale)]; found {
+ return
+ }
+
+ return t.fallback, false
+}
+
+// GetFallback returns the fallback locale
+func (t *UniversalTranslator) GetFallback() Translator {
+ return t.fallback
+}
+
+// AddTranslator adds the supplied translator, if it already exists the override param
+// will be checked and if false an error will be returned, otherwise the translator will be
+// overridden; if the fallback matches the supplied translator it will be overridden as well
+// NOTE: this is normally only used when translator is embedded within a library
+func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error {
+
+ lc := strings.ToLower(translator.Locale())
+ _, ok := t.translators[lc]
+ if ok && !override {
+ return &ErrExistingTranslator{locale: translator.Locale()}
+ }
+
+ trans := newTranslator(translator)
+
+ if t.fallback.Locale() == translator.Locale() {
+
+ // because it's optional to have a fallback, I don't impose that limitation
+ // don't know why you wouldn't but...
+ if !override {
+ return &ErrExistingTranslator{locale: translator.Locale()}
+ }
+
+ t.fallback = trans
+ }
+
+ t.translators[lc] = trans
+
+ return nil
+}
+
+// VerifyTranslations runs through all locales and identifies any issues
+// eg. missing plural rules for a locale
+func (t *UniversalTranslator) VerifyTranslations() (err error) {
+
+ for _, trans := range t.translators {
+ err = trans.VerifyTranslations()
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/go-playground/validator/v10/.gitignore b/vendor/github.com/go-playground/validator/v10/.gitignore
new file mode 100644
index 0000000..6e43fac
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/.gitignore
@@ -0,0 +1,30 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+bin
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.test
+*.out
+*.txt
+cover.html
+README.html
diff --git a/vendor/github.com/go-playground/validator/v10/.travis.yml b/vendor/github.com/go-playground/validator/v10/.travis.yml
new file mode 100644
index 0000000..85a7be3
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/.travis.yml
@@ -0,0 +1,29 @@
+language: go
+go:
+ - 1.15.2
+ - tip
+matrix:
+ allow_failures:
+ - go: tip
+
+notifications:
+ email:
+ recipients: dean.karn@gmail.com
+ on_success: change
+ on_failure: always
+
+before_install:
+ - go install github.com/mattn/goveralls
+ - mkdir -p $GOPATH/src/gopkg.in
+ - ln -s $GOPATH/src/github.com/$TRAVIS_REPO_SLUG $GOPATH/src/gopkg.in/validator.v9
+
+# Only clone the most recent commit.
+git:
+ depth: 1
+
+script:
+ - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./...
+
+after_success: |
+ [ $TRAVIS_GO_VERSION = 1.15.2 ] &&
+ goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/go-playground/validator/v10/LICENSE b/vendor/github.com/go-playground/validator/v10/LICENSE
new file mode 100644
index 0000000..6a2ae9a
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Dean Karn
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile
new file mode 100644
index 0000000..19c91ed
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/Makefile
@@ -0,0 +1,18 @@
+GOCMD=GO111MODULE=on go
+
+linters-install:
+ @golangci-lint --version >/dev/null 2>&1 || { \
+ echo "installing linting tools..."; \
+ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.21.0; \
+ }
+
+lint: linters-install
+ $(PWD)/bin/golangci-lint run
+
+test:
+ $(GOCMD) test -cover -race ./...
+
+bench:
+ $(GOCMD) test -bench=. -benchmem ./...
+
+.PHONY: test lint linters-install
\ No newline at end of file
diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md
new file mode 100644
index 0000000..04fbb3c
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/README.md
@@ -0,0 +1,299 @@
+Package validator
+================
+[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+![Project status](https://img.shields.io/badge/version-10.4.1-green.svg)
+[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
+[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
+[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10)
+![License](https://img.shields.io/dub/l/vibe-d.svg)
+
+Package validator implements value validations for structs and individual fields based on tags.
+
+It has the following **unique** features:
+
+- Cross Field and Cross Struct validations by using validation tags or custom validators.
+- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated.
+- Ability to dive into both map keys and values for validation
+- Handles type interface by determining it's underlying type prior to validation.
+- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29)
+- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs
+- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError
+- Customizable i18n aware error messages.
+- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding)
+
+Installation
+------------
+
+Use go get.
+
+ go get github.com/go-playground/validator/v10
+
+Then import the validator package into your own code.
+
+ import "github.com/go-playground/validator/v10"
+
+Error Return Value
+-------
+
+Validation functions return type error
+
+They return type error to avoid the issue discussed in the following, where err is always != nil:
+
+* http://stackoverflow.com/a/29138676/3158232
+* https://github.com/go-playground/validator/issues/134
+
+Validator only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so:
+
+```go
+err := validate.Struct(mystruct)
+validationErrors := err.(validator.ValidationErrors)
+ ```
+
+Usage and documentation
+------
+
+Please see https://godoc.org/github.com/go-playground/validator for detailed usage docs.
+
+##### Examples:
+
+- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go)
+- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go)
+- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go)
+- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go)
+- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding)
+- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash)
+
+Baked-in Validations
+------
+
+### Fields:
+
+| Tag | Description |
+| - | - |
+| eqcsfield | Field Equals Another Field (relative)|
+| eqfield | Field Equals Another Field |
+| fieldcontains | NOT DOCUMENTED IN doc.go |
+| fieldexcludes | NOT DOCUMENTED IN doc.go |
+| gtcsfield | Field Greater Than Another Relative Field |
+| gtecsfield | Field Greater Than or Equal To Another Relative Field |
+| gtefield | Field Greater Than or Equal To Another Field |
+| gtfield | Field Greater Than Another Field |
+| ltcsfield | Less Than Another Relative Field |
+| ltecsfield | Less Than or Equal To Another Relative Field |
+| ltefield | Less Than or Equal To Another Field |
+| ltfield | Less Than Another Field |
+| necsfield | Field Does Not Equal Another Field (relative) |
+| nefield | Field Does Not Equal Another Field |
+
+### Network:
+
+| Tag | Description |
+| - | - |
+| cidr | Classless Inter-Domain Routing CIDR |
+| cidrv4 | Classless Inter-Domain Routing CIDRv4 |
+| cidrv6 | Classless Inter-Domain Routing CIDRv6 |
+| datauri | Data URL |
+| fqdn | Full Qualified Domain Name (FQDN) |
+| hostname | Hostname RFC 952 |
+| hostname_port | HostPort |
+| hostname_rfc1123 | Hostname RFC 1123 |
+| ip | Internet Protocol Address IP |
+| ip4_addr | Internet Protocol Address IPv4 |
+| ip6_addr |Internet Protocol Address IPv6 |
+| ip_addr | Internet Protocol Address IP |
+| ipv4 | Internet Protocol Address IPv4 |
+| ipv6 | Internet Protocol Address IPv6 |
+| mac | Media Access Control Address MAC |
+| tcp4_addr | Transmission Control Protocol Address TCPv4 |
+| tcp6_addr | Transmission Control Protocol Address TCPv6 |
+| tcp_addr | Transmission Control Protocol Address TCP |
+| udp4_addr | User Datagram Protocol Address UDPv4 |
+| udp6_addr | User Datagram Protocol Address UDPv6 |
+| udp_addr | User Datagram Protocol Address UDP |
+| unix_addr | Unix domain socket end point Address |
+| uri | URI String |
+| url | URL String |
+| url_encoded | URL Encoded |
+| urn_rfc2141 | Urn RFC 2141 String |
+
+### Strings:
+
+| Tag | Description |
+| - | - |
+| alpha | Alpha Only |
+| alphanum | Alphanumeric |
+| alphanumunicode | Alphanumeric Unicode |
+| alphaunicode | Alpha Unicode |
+| ascii | ASCII |
+| contains | Contains |
+| containsany | Contains Any |
+| containsrune | Contains Rune |
+| endswith | Ends With |
+| lowercase | Lowercase |
+| multibyte | Multi-Byte Characters |
+| number | NOT DOCUMENTED IN doc.go |
+| numeric | Numeric |
+| printascii | Printable ASCII |
+| startswith | Starts With |
+| uppercase | Uppercase |
+
+### Format:
+| Tag | Description |
+| - | - |
+| base64 | Base64 String |
+| base64url | Base64URL String |
+| btc_addr | Bitcoin Address |
+| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) |
+| datetime | Datetime |
+| e164 | e164 formatted phone number |
+| email | E-mail String
+| eth_addr | Ethereum Address |
+| hexadecimal | Hexadecimal String |
+| hexcolor | Hexcolor String |
+| hsl | HSL String |
+| hsla | HSLA String |
+| html | HTML Tags |
+| html_encoded | HTML Encoded |
+| isbn | International Standard Book Number |
+| isbn10 | International Standard Book Number 10 |
+| isbn13 | International Standard Book Number 13 |
+| json | JSON |
+| latitude | Latitude |
+| longitude | Longitude |
+| rgb | RGB String |
+| rgba | RGBA String |
+| ssn | Social Security Number SSN |
+| uuid | Universally Unique Identifier UUID |
+| uuid3 | Universally Unique Identifier UUID v3 |
+| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 |
+| uuid4 | Universally Unique Identifier UUID v4 |
+| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 |
+| uuid5 | Universally Unique Identifier UUID v5 |
+| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 |
+| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 |
+
+### Comparisons:
+| Tag | Description |
+| - | - |
+| eq | Equals |
+| gt | Greater than|
+| gte |Greater than or equal |
+| lt | Less Than |
+| lte | Less Than or Equal |
+| ne | Not Equal |
+
+### Other:
+| Tag | Description |
+| - | - |
+| dir | Directory |
+| endswith | Ends With |
+| excludes | Excludes |
+| excludesall | Excludes All |
+| excludesrune | Excludes Rune |
+| file | File path |
+| isdefault | Is Default |
+| len | Length |
+| max | Maximum |
+| min | Minimum |
+| oneof | One Of |
+| required | Required |
+| required_if | Required If |
+| required_unless | Required Unless |
+| required_with | Required With |
+| required_with_all | Required With All |
+| required_without | Required Without |
+| required_without_all | Required Without All |
+| excluded_with | Excluded With |
+| excluded_with_all | Excluded With All |
+| excluded_without | Excluded Without |
+| excluded_without_all | Excluded Without All |
+| unique | Unique |
+
+Benchmarks
+------
+###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64
+```go
+goos: darwin
+goarch: amd64
+pkg: github.com/go-playground/validator
+BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op
+BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op
+BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op
+BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op
+BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op
+BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op
+BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op
+BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op
+BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op
+BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op
+BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op
+BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op
+BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op
+BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op
+BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op
+BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op
+BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op
+BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op
+BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op
+BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op
+BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op
+BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op
+BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op
+BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op
+BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op
+BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op
+BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op
+BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op
+BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op
+BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op
+BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op
+BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op
+BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op
+BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op
+BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op
+BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op
+BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op
+BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op
+BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op
+BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op
+BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op
+BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op
+BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op
+BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op
+BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op
+BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op
+BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op
+BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op
+BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op
+BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op
+BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op
+BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op
+BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op
+BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op
+BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op
+BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op
+BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op
+BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op
+BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op
+```
+
+Complementary Software
+----------------------
+
+Here is a list of software that complements using this library either pre or post validation.
+
+* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support.
+* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects
+
+How to Contribute
+------
+
+Make a pull request...
+
+License
+------
+Distributed under MIT License, please see license file within the code for more details.
diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go
new file mode 100644
index 0000000..6ce762d
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/baked_in.go
@@ -0,0 +1,2285 @@
+package validator
+
+import (
+ "bytes"
+ "context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "golang.org/x/crypto/sha3"
+
+ urn "github.com/leodido/go-urn"
+)
+
+// Func accepts a FieldLevel interface for all validation needs. The return
+// value should be true when validation succeeds.
+type Func func(fl FieldLevel) bool
+
+// FuncCtx accepts a context.Context and FieldLevel interface for all
+// validation needs. The return value should be true when validation succeeds.
+type FuncCtx func(ctx context.Context, fl FieldLevel) bool
+
+// wrapFunc wraps noramal Func makes it compatible with FuncCtx
+func wrapFunc(fn Func) FuncCtx {
+ if fn == nil {
+ return nil // be sure not to wrap a bad function.
+ }
+ return func(ctx context.Context, fl FieldLevel) bool {
+ return fn(fl)
+ }
+}
+
+var (
+ restrictedTags = map[string]struct{}{
+ diveTag: {},
+ keysTag: {},
+ endKeysTag: {},
+ structOnlyTag: {},
+ omitempty: {},
+ skipValidationTag: {},
+ utf8HexComma: {},
+ utf8Pipe: {},
+ noStructLevelTag: {},
+ requiredTag: {},
+ isdefault: {},
+ }
+
+ // BakedInAliasValidators is a default mapping of a single validation tag that
+ // defines a common or complex set of validation(s) to simplify
+ // adding validation to structs.
+ bakedInAliases = map[string]string{
+ "iscolor": "hexcolor|rgb|rgba|hsl|hsla",
+ "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric",
+ }
+
+ // BakedInValidators is the default map of ValidationFunc
+ // you can add, remove or even replace items to suite your needs,
+ // or even disregard and use your own map if so desired.
+ bakedInValidators = map[string]Func{
+ "required": hasValue,
+ "required_if": requiredIf,
+ "required_unless": requiredUnless,
+ "required_with": requiredWith,
+ "required_with_all": requiredWithAll,
+ "required_without": requiredWithout,
+ "required_without_all": requiredWithoutAll,
+ "excluded_with": excludedWith,
+ "excluded_with_all": excludedWithAll,
+ "excluded_without": excludedWithout,
+ "excluded_without_all": excludedWithoutAll,
+ "isdefault": isDefault,
+ "len": hasLengthOf,
+ "min": hasMinOf,
+ "max": hasMaxOf,
+ "eq": isEq,
+ "ne": isNe,
+ "lt": isLt,
+ "lte": isLte,
+ "gt": isGt,
+ "gte": isGte,
+ "eqfield": isEqField,
+ "eqcsfield": isEqCrossStructField,
+ "necsfield": isNeCrossStructField,
+ "gtcsfield": isGtCrossStructField,
+ "gtecsfield": isGteCrossStructField,
+ "ltcsfield": isLtCrossStructField,
+ "ltecsfield": isLteCrossStructField,
+ "nefield": isNeField,
+ "gtefield": isGteField,
+ "gtfield": isGtField,
+ "ltefield": isLteField,
+ "ltfield": isLtField,
+ "fieldcontains": fieldContains,
+ "fieldexcludes": fieldExcludes,
+ "alpha": isAlpha,
+ "alphanum": isAlphanum,
+ "alphaunicode": isAlphaUnicode,
+ "alphanumunicode": isAlphanumUnicode,
+ "numeric": isNumeric,
+ "number": isNumber,
+ "hexadecimal": isHexadecimal,
+ "hexcolor": isHEXColor,
+ "rgb": isRGB,
+ "rgba": isRGBA,
+ "hsl": isHSL,
+ "hsla": isHSLA,
+ "e164": isE164,
+ "email": isEmail,
+ "url": isURL,
+ "uri": isURI,
+ "urn_rfc2141": isUrnRFC2141, // RFC 2141
+ "file": isFile,
+ "base64": isBase64,
+ "base64url": isBase64URL,
+ "contains": contains,
+ "containsany": containsAny,
+ "containsrune": containsRune,
+ "excludes": excludes,
+ "excludesall": excludesAll,
+ "excludesrune": excludesRune,
+ "startswith": startsWith,
+ "endswith": endsWith,
+ "startsnotwith": startsNotWith,
+ "endsnotwith": endsNotWith,
+ "isbn": isISBN,
+ "isbn10": isISBN10,
+ "isbn13": isISBN13,
+ "eth_addr": isEthereumAddress,
+ "btc_addr": isBitcoinAddress,
+ "btc_addr_bech32": isBitcoinBech32Address,
+ "uuid": isUUID,
+ "uuid3": isUUID3,
+ "uuid4": isUUID4,
+ "uuid5": isUUID5,
+ "uuid_rfc4122": isUUIDRFC4122,
+ "uuid3_rfc4122": isUUID3RFC4122,
+ "uuid4_rfc4122": isUUID4RFC4122,
+ "uuid5_rfc4122": isUUID5RFC4122,
+ "ascii": isASCII,
+ "printascii": isPrintableASCII,
+ "multibyte": hasMultiByteCharacter,
+ "datauri": isDataURI,
+ "latitude": isLatitude,
+ "longitude": isLongitude,
+ "ssn": isSSN,
+ "ipv4": isIPv4,
+ "ipv6": isIPv6,
+ "ip": isIP,
+ "cidrv4": isCIDRv4,
+ "cidrv6": isCIDRv6,
+ "cidr": isCIDR,
+ "tcp4_addr": isTCP4AddrResolvable,
+ "tcp6_addr": isTCP6AddrResolvable,
+ "tcp_addr": isTCPAddrResolvable,
+ "udp4_addr": isUDP4AddrResolvable,
+ "udp6_addr": isUDP6AddrResolvable,
+ "udp_addr": isUDPAddrResolvable,
+ "ip4_addr": isIP4AddrResolvable,
+ "ip6_addr": isIP6AddrResolvable,
+ "ip_addr": isIPAddrResolvable,
+ "unix_addr": isUnixAddrResolvable,
+ "mac": isMAC,
+ "hostname": isHostnameRFC952, // RFC 952
+ "hostname_rfc1123": isHostnameRFC1123, // RFC 1123
+ "fqdn": isFQDN,
+ "unique": isUnique,
+ "oneof": isOneOf,
+ "html": isHTML,
+ "html_encoded": isHTMLEncoded,
+ "url_encoded": isURLEncoded,
+ "dir": isDir,
+ "json": isJSON,
+ "hostname_port": isHostnamePort,
+ "lowercase": isLowercase,
+ "uppercase": isUppercase,
+ "datetime": isDatetime,
+ "timezone": isTimeZone,
+ "iso3166_1_alpha2": isIso3166Alpha2,
+ "iso3166_1_alpha3": isIso3166Alpha3,
+ "iso3166_1_alpha_numeric": isIso3166AlphaNumeric,
+ }
+)
+
+var oneofValsCache = map[string][]string{}
+var oneofValsCacheRWLock = sync.RWMutex{}
+
+func parseOneOfParam2(s string) []string {
+ oneofValsCacheRWLock.RLock()
+ vals, ok := oneofValsCache[s]
+ oneofValsCacheRWLock.RUnlock()
+ if !ok {
+ oneofValsCacheRWLock.Lock()
+ vals = splitParamsRegex.FindAllString(s, -1)
+ for i := 0; i < len(vals); i++ {
+ vals[i] = strings.Replace(vals[i], "'", "", -1)
+ }
+ oneofValsCache[s] = vals
+ oneofValsCacheRWLock.Unlock()
+ }
+ return vals
+}
+
+func isURLEncoded(fl FieldLevel) bool {
+ return uRLEncodedRegex.MatchString(fl.Field().String())
+}
+
+func isHTMLEncoded(fl FieldLevel) bool {
+ return hTMLEncodedRegex.MatchString(fl.Field().String())
+}
+
+func isHTML(fl FieldLevel) bool {
+ return hTMLRegex.MatchString(fl.Field().String())
+}
+
+func isOneOf(fl FieldLevel) bool {
+ vals := parseOneOfParam2(fl.Param())
+
+ field := fl.Field()
+
+ var v string
+ switch field.Kind() {
+ case reflect.String:
+ v = field.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ v = strconv.FormatInt(field.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ v = strconv.FormatUint(field.Uint(), 10)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+ for i := 0; i < len(vals); i++ {
+ if vals[i] == v {
+ return true
+ }
+ }
+ return false
+}
+
+// isUnique is the validation function for validating if each array|slice|map value is unique
+func isUnique(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+ v := reflect.ValueOf(struct{}{})
+
+ switch field.Kind() {
+ case reflect.Slice, reflect.Array:
+ elem := field.Type().Elem()
+ if elem.Kind() == reflect.Ptr {
+ elem = elem.Elem()
+ }
+
+ if param == "" {
+ m := reflect.MakeMap(reflect.MapOf(elem, v.Type()))
+
+ for i := 0; i < field.Len(); i++ {
+ m.SetMapIndex(reflect.Indirect(field.Index(i)), v)
+ }
+ return field.Len() == m.Len()
+ }
+
+ sf, ok := elem.FieldByName(param)
+ if !ok {
+ panic(fmt.Sprintf("Bad field name %s", param))
+ }
+
+ sfTyp := sf.Type
+ if sfTyp.Kind() == reflect.Ptr {
+ sfTyp = sfTyp.Elem()
+ }
+
+ m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type()))
+ for i := 0; i < field.Len(); i++ {
+ m.SetMapIndex(reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param)), v)
+ }
+ return field.Len() == m.Len()
+ case reflect.Map:
+ m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
+
+ for _, k := range field.MapKeys() {
+ m.SetMapIndex(field.MapIndex(k), v)
+ }
+ return field.Len() == m.Len()
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+}
+
+// IsMAC is the validation function for validating if the field's value is a valid MAC address.
+func isMAC(fl FieldLevel) bool {
+
+ _, err := net.ParseMAC(fl.Field().String())
+
+ return err == nil
+}
+
+// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address.
+func isCIDRv4(fl FieldLevel) bool {
+
+ ip, _, err := net.ParseCIDR(fl.Field().String())
+
+ return err == nil && ip.To4() != nil
+}
+
+// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address.
+func isCIDRv6(fl FieldLevel) bool {
+
+ ip, _, err := net.ParseCIDR(fl.Field().String())
+
+ return err == nil && ip.To4() == nil
+}
+
+// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address.
+func isCIDR(fl FieldLevel) bool {
+
+ _, _, err := net.ParseCIDR(fl.Field().String())
+
+ return err == nil
+}
+
+// IsIPv4 is the validation function for validating if a value is a valid v4 IP address.
+func isIPv4(fl FieldLevel) bool {
+
+ ip := net.ParseIP(fl.Field().String())
+
+ return ip != nil && ip.To4() != nil
+}
+
+// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address.
+func isIPv6(fl FieldLevel) bool {
+
+ ip := net.ParseIP(fl.Field().String())
+
+ return ip != nil && ip.To4() == nil
+}
+
+// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address.
+func isIP(fl FieldLevel) bool {
+
+ ip := net.ParseIP(fl.Field().String())
+
+ return ip != nil
+}
+
+// IsSSN is the validation function for validating if the field's value is a valid SSN.
+func isSSN(fl FieldLevel) bool {
+
+ field := fl.Field()
+
+ if field.Len() != 11 {
+ return false
+ }
+
+ return sSNRegex.MatchString(field.String())
+}
+
+// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate.
+func isLongitude(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var v string
+ switch field.Kind() {
+ case reflect.String:
+ v = field.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ v = strconv.FormatInt(field.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ v = strconv.FormatUint(field.Uint(), 10)
+ case reflect.Float32:
+ v = strconv.FormatFloat(field.Float(), 'f', -1, 32)
+ case reflect.Float64:
+ v = strconv.FormatFloat(field.Float(), 'f', -1, 64)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+
+ return longitudeRegex.MatchString(v)
+}
+
+// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate.
+func isLatitude(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var v string
+ switch field.Kind() {
+ case reflect.String:
+ v = field.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ v = strconv.FormatInt(field.Int(), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ v = strconv.FormatUint(field.Uint(), 10)
+ case reflect.Float32:
+ v = strconv.FormatFloat(field.Float(), 'f', -1, 32)
+ case reflect.Float64:
+ v = strconv.FormatFloat(field.Float(), 'f', -1, 64)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+
+ return latitudeRegex.MatchString(v)
+}
+
+// IsDataURI is the validation function for validating if the field's value is a valid data URI.
+func isDataURI(fl FieldLevel) bool {
+
+ uri := strings.SplitN(fl.Field().String(), ",", 2)
+
+ if len(uri) != 2 {
+ return false
+ }
+
+ if !dataURIRegex.MatchString(uri[0]) {
+ return false
+ }
+
+ return base64Regex.MatchString(uri[1])
+}
+
+// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character.
+func hasMultiByteCharacter(fl FieldLevel) bool {
+
+ field := fl.Field()
+
+ if field.Len() == 0 {
+ return true
+ }
+
+ return multibyteRegex.MatchString(field.String())
+}
+
+// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character.
+func isPrintableASCII(fl FieldLevel) bool {
+ return printableASCIIRegex.MatchString(fl.Field().String())
+}
+
+// IsASCII is the validation function for validating if the field's value is a valid ASCII character.
+func isASCII(fl FieldLevel) bool {
+ return aSCIIRegex.MatchString(fl.Field().String())
+}
+
+// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID.
+func isUUID5(fl FieldLevel) bool {
+ return uUID5Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID.
+func isUUID4(fl FieldLevel) bool {
+ return uUID4Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID.
+func isUUID3(fl FieldLevel) bool {
+ return uUID3Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID is the validation function for validating if the field's value is a valid UUID of any version.
+func isUUID(fl FieldLevel) bool {
+ return uUIDRegex.MatchString(fl.Field().String())
+}
+
+// IsUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID.
+func isUUID5RFC4122(fl FieldLevel) bool {
+ return uUID5RFC4122Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID.
+func isUUID4RFC4122(fl FieldLevel) bool {
+ return uUID4RFC4122Regex.MatchString(fl.Field().String())
+}
+
+// IsUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID.
+func isUUID3RFC4122(fl FieldLevel) bool {
+ return uUID3RFC4122Regex.MatchString(fl.Field().String())
+}
+
+// IsUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version.
+func isUUIDRFC4122(fl FieldLevel) bool {
+ return uUIDRFC4122Regex.MatchString(fl.Field().String())
+}
+
+// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN.
+func isISBN(fl FieldLevel) bool {
+ return isISBN10(fl) || isISBN13(fl)
+}
+
+// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN.
+func isISBN13(fl FieldLevel) bool {
+
+ s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4)
+
+ if !iSBN13Regex.MatchString(s) {
+ return false
+ }
+
+ var checksum int32
+ var i int32
+
+ factor := []int32{1, 3}
+
+ for i = 0; i < 12; i++ {
+ checksum += factor[i%2] * int32(s[i]-'0')
+ }
+
+ return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0
+}
+
+// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN.
+func isISBN10(fl FieldLevel) bool {
+
+ s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3)
+
+ if !iSBN10Regex.MatchString(s) {
+ return false
+ }
+
+ var checksum int32
+ var i int32
+
+ for i = 0; i < 9; i++ {
+ checksum += (i + 1) * int32(s[i]-'0')
+ }
+
+ if s[9] == 'X' {
+ checksum += 10 * 10
+ } else {
+ checksum += 10 * int32(s[9]-'0')
+ }
+
+ return checksum%11 == 0
+}
+
+// IsEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address.
+func isEthereumAddress(fl FieldLevel) bool {
+ address := fl.Field().String()
+
+ if !ethAddressRegex.MatchString(address) {
+ return false
+ }
+
+ if ethaddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) {
+ return true
+ }
+
+ // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
+ address = address[2:] // Skip "0x" prefix.
+ h := sha3.NewLegacyKeccak256()
+ // hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash
+ _, _ = h.Write([]byte(strings.ToLower(address)))
+ hash := hex.EncodeToString(h.Sum(nil))
+
+ for i := 0; i < len(address); i++ {
+ if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case.
+ continue
+ }
+ if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' {
+ return false
+ }
+ }
+
+ return true
+}
+
+// IsBitcoinAddress is the validation function for validating if the field's value is a valid btc address
+func isBitcoinAddress(fl FieldLevel) bool {
+ address := fl.Field().String()
+
+ if !btcAddressRegex.MatchString(address) {
+ return false
+ }
+
+ alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz")
+
+ decode := [25]byte{}
+
+ for _, n := range []byte(address) {
+ d := bytes.IndexByte(alphabet, n)
+
+ for i := 24; i >= 0; i-- {
+ d += 58 * int(decode[i])
+ decode[i] = byte(d % 256)
+ d /= 256
+ }
+ }
+
+ h := sha256.New()
+ _, _ = h.Write(decode[:21])
+ d := h.Sum([]byte{})
+ h = sha256.New()
+ _, _ = h.Write(d)
+
+ validchecksum := [4]byte{}
+ computedchecksum := [4]byte{}
+
+ copy(computedchecksum[:], h.Sum(d[:0]))
+ copy(validchecksum[:], decode[21:])
+
+ return validchecksum == computedchecksum
+}
+
+// IsBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address
+func isBitcoinBech32Address(fl FieldLevel) bool {
+ address := fl.Field().String()
+
+ if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) {
+ return false
+ }
+
+ am := len(address) % 8
+
+ if am == 0 || am == 3 || am == 5 {
+ return false
+ }
+
+ address = strings.ToLower(address)
+
+ alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
+
+ hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc
+ addr := address[3:]
+ dp := make([]int, 0, len(addr))
+
+ for _, c := range addr {
+ dp = append(dp, strings.IndexRune(alphabet, c))
+ }
+
+ ver := dp[0]
+
+ if ver < 0 || ver > 16 {
+ return false
+ }
+
+ if ver == 0 {
+ if len(address) != 42 && len(address) != 62 {
+ return false
+ }
+ }
+
+ values := append(hr, dp...)
+
+ GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3}
+
+ p := 1
+
+ for _, v := range values {
+ b := p >> 25
+ p = (p&0x1ffffff)<<5 ^ v
+
+ for i := 0; i < 5; i++ {
+ if (b>>uint(i))&1 == 1 {
+ p ^= GEN[i]
+ }
+ }
+ }
+
+ if p != 1 {
+ return false
+ }
+
+ b := uint(0)
+ acc := 0
+ mv := (1 << 5) - 1
+ var sw []int
+
+ for _, v := range dp[1 : len(dp)-6] {
+ acc = (acc << 5) | v
+ b += 5
+ for b >= 8 {
+ b -= 8
+ sw = append(sw, (acc>>b)&mv)
+ }
+ }
+
+ if len(sw) < 2 || len(sw) > 40 {
+ return false
+ }
+
+ return true
+}
+
+// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param.
+func excludesRune(fl FieldLevel) bool {
+ return !containsRune(fl)
+}
+
+// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param.
+func excludesAll(fl FieldLevel) bool {
+ return !containsAny(fl)
+}
+
+// Excludes is the validation function for validating that the field's value does not contain the text specified within the param.
+func excludes(fl FieldLevel) bool {
+ return !contains(fl)
+}
+
+// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param.
+func containsRune(fl FieldLevel) bool {
+
+ r, _ := utf8.DecodeRuneInString(fl.Param())
+
+ return strings.ContainsRune(fl.Field().String(), r)
+}
+
+// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param.
+func containsAny(fl FieldLevel) bool {
+ return strings.ContainsAny(fl.Field().String(), fl.Param())
+}
+
+// Contains is the validation function for validating that the field's value contains the text specified within the param.
+func contains(fl FieldLevel) bool {
+ return strings.Contains(fl.Field().String(), fl.Param())
+}
+
+// StartsWith is the validation function for validating that the field's value starts with the text specified within the param.
+func startsWith(fl FieldLevel) bool {
+ return strings.HasPrefix(fl.Field().String(), fl.Param())
+}
+
+// EndsWith is the validation function for validating that the field's value ends with the text specified within the param.
+func endsWith(fl FieldLevel) bool {
+ return strings.HasSuffix(fl.Field().String(), fl.Param())
+}
+
+// StartsNotWith is the validation function for validating that the field's value does not start with the text specified within the param.
+func startsNotWith(fl FieldLevel) bool {
+ return !startsWith(fl)
+}
+
+// EndsNotWith is the validation function for validating that the field's value does not end with the text specified within the param.
+func endsNotWith(fl FieldLevel) bool {
+ return !endsWith(fl)
+}
+
+// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value.
+func fieldContains(fl FieldLevel) bool {
+ field := fl.Field()
+
+ currentField, _, ok := fl.GetStructFieldOK()
+
+ if !ok {
+ return false
+ }
+
+ return strings.Contains(field.String(), currentField.String())
+}
+
+// FieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value.
+func fieldExcludes(fl FieldLevel) bool {
+ field := fl.Field()
+
+ currentField, _, ok := fl.GetStructFieldOK()
+ if !ok {
+ return true
+ }
+
+ return !strings.Contains(field.String(), currentField.String())
+}
+
+// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value.
+func isNeField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+
+ if !ok || currentKind != kind {
+ return true
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() != currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() != currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() != currentField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) != int64(currentField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return true
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return !fieldTime.Equal(t)
+ }
+
+ }
+
+ // default reflect.String:
+ return field.String() != currentField.String()
+}
+
+// IsNe is the validation function for validating that the field's value does not equal the provided param value.
+func isNe(fl FieldLevel) bool {
+ return !isEq(fl)
+}
+
+// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value.
+func isLteCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() <= topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() <= topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() <= topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) <= int64(topField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.Before(topTime) || fieldTime.Equal(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() <= topField.String()
+}
+
+// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value.
+// NOTE: This is exposed for use within your own custom functions and not intended to be called directly.
+func isLtCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() < topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() < topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() < topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) < int64(topField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.Before(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() < topField.String()
+}
+
+// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value.
+func isGteCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() >= topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() >= topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() >= topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) >= int64(topField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.After(topTime) || fieldTime.Equal(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() >= topField.String()
+}
+
+// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value.
+func isGtCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() > topField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() > topField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() > topField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) > int64(topField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ fieldTime := field.Interface().(time.Time)
+ topTime := topField.Interface().(time.Time)
+
+ return fieldTime.After(topTime)
+ }
+ }
+
+ // default reflect.String:
+ return field.String() > topField.String()
+}
+
+// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value.
+func isNeCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return true
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return topField.Int() != field.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return topField.Uint() != field.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return topField.Float() != field.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(topField.Len()) != int64(field.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return true
+ }
+
+ if fieldType == timeType {
+
+ t := field.Interface().(time.Time)
+ fieldTime := topField.Interface().(time.Time)
+
+ return !fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String:
+ return topField.String() != field.String()
+}
+
+// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value.
+func isEqCrossStructField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ topField, topKind, ok := fl.GetStructFieldOK()
+ if !ok || topKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return topField.Int() == field.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return topField.Uint() == field.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return topField.Float() == field.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(topField.Len()) == int64(field.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != topField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := field.Interface().(time.Time)
+ fieldTime := topField.Interface().(time.Time)
+
+ return fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String:
+ return topField.String() == field.String()
+}
+
+// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value.
+func isEqField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() == currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() == currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() == currentField.Float()
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) == int64(currentField.Len())
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Equal(t)
+ }
+
+ }
+
+ // default reflect.String:
+ return field.String() == currentField.String()
+}
+
+// IsEq is the validation function for validating if the current field's value is equal to the param's value.
+func isEq(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ return field.String() == param
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) == p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() == p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() == p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() == p
+
+ case reflect.Bool:
+ p := asBool(param)
+
+ return field.Bool() == p
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsBase64 is the validation function for validating if the current field's value is a valid base 64.
+func isBase64(fl FieldLevel) bool {
+ return base64Regex.MatchString(fl.Field().String())
+}
+
+// IsBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string.
+func isBase64URL(fl FieldLevel) bool {
+ return base64URLRegex.MatchString(fl.Field().String())
+}
+
+// IsURI is the validation function for validating if the current field's value is a valid URI.
+func isURI(fl FieldLevel) bool {
+
+ field := fl.Field()
+
+ switch field.Kind() {
+
+ case reflect.String:
+
+ s := field.String()
+
+ // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
+ // emulate browser and strip the '#' suffix prior to validation. see issue-#237
+ if i := strings.Index(s, "#"); i > -1 {
+ s = s[:i]
+ }
+
+ if len(s) == 0 {
+ return false
+ }
+
+ _, err := url.ParseRequestURI(s)
+
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsURL is the validation function for validating if the current field's value is a valid URL.
+func isURL(fl FieldLevel) bool {
+
+ field := fl.Field()
+
+ switch field.Kind() {
+
+ case reflect.String:
+
+ var i int
+ s := field.String()
+
+ // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195
+ // emulate browser and strip the '#' suffix prior to validation. see issue-#237
+ if i = strings.Index(s, "#"); i > -1 {
+ s = s[:i]
+ }
+
+ if len(s) == 0 {
+ return false
+ }
+
+ url, err := url.ParseRequestURI(s)
+
+ if err != nil || url.Scheme == "" {
+ return false
+ }
+
+ return true
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141.
+func isUrnRFC2141(fl FieldLevel) bool {
+ field := fl.Field()
+
+ switch field.Kind() {
+
+ case reflect.String:
+
+ str := field.String()
+
+ _, match := urn.Parse([]byte(str))
+
+ return match
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsFile is the validation function for validating if the current field's value is a valid file path.
+func isFile(fl FieldLevel) bool {
+ field := fl.Field()
+
+ switch field.Kind() {
+ case reflect.String:
+ fileInfo, err := os.Stat(field.String())
+ if err != nil {
+ return false
+ }
+
+ return !fileInfo.IsDir()
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number.
+func isE164(fl FieldLevel) bool {
+ return e164Regex.MatchString(fl.Field().String())
+}
+
+// IsEmail is the validation function for validating if the current field's value is a valid email address.
+func isEmail(fl FieldLevel) bool {
+ return emailRegex.MatchString(fl.Field().String())
+}
+
+// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color.
+func isHSLA(fl FieldLevel) bool {
+ return hslaRegex.MatchString(fl.Field().String())
+}
+
+// IsHSL is the validation function for validating if the current field's value is a valid HSL color.
+func isHSL(fl FieldLevel) bool {
+ return hslRegex.MatchString(fl.Field().String())
+}
+
+// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color.
+func isRGBA(fl FieldLevel) bool {
+ return rgbaRegex.MatchString(fl.Field().String())
+}
+
+// IsRGB is the validation function for validating if the current field's value is a valid RGB color.
+func isRGB(fl FieldLevel) bool {
+ return rgbRegex.MatchString(fl.Field().String())
+}
+
+// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color.
+func isHEXColor(fl FieldLevel) bool {
+ return hexcolorRegex.MatchString(fl.Field().String())
+}
+
+// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal.
+func isHexadecimal(fl FieldLevel) bool {
+ return hexadecimalRegex.MatchString(fl.Field().String())
+}
+
+// IsNumber is the validation function for validating if the current field's value is a valid number.
+func isNumber(fl FieldLevel) bool {
+ switch fl.Field().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
+ return true
+ default:
+ return numberRegex.MatchString(fl.Field().String())
+ }
+}
+
+// IsNumeric is the validation function for validating if the current field's value is a valid numeric value.
+func isNumeric(fl FieldLevel) bool {
+ switch fl.Field().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
+ return true
+ default:
+ return numericRegex.MatchString(fl.Field().String())
+ }
+}
+
+// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value.
+func isAlphanum(fl FieldLevel) bool {
+ return alphaNumericRegex.MatchString(fl.Field().String())
+}
+
+// IsAlpha is the validation function for validating if the current field's value is a valid alpha value.
+func isAlpha(fl FieldLevel) bool {
+ return alphaRegex.MatchString(fl.Field().String())
+}
+
+// IsAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value.
+func isAlphanumUnicode(fl FieldLevel) bool {
+ return alphaUnicodeNumericRegex.MatchString(fl.Field().String())
+}
+
+// IsAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value.
+func isAlphaUnicode(fl FieldLevel) bool {
+ return alphaUnicodeRegex.MatchString(fl.Field().String())
+}
+
+// isDefault is the opposite of required aka hasValue
+func isDefault(fl FieldLevel) bool {
+ return !hasValue(fl)
+}
+
+// HasValue is the validation function for validating if the current field's value is not the default static value.
+func hasValue(fl FieldLevel) bool {
+ field := fl.Field()
+ switch field.Kind() {
+ case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+ return !field.IsNil()
+ default:
+ if fl.(*validate).fldIsPointer && field.Interface() != nil {
+ return true
+ }
+ return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface()
+ }
+}
+
+// requireCheckField is a func for check field kind
+func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool {
+ field := fl.Field()
+ kind := field.Kind()
+ var nullable, found bool
+ if len(param) > 0 {
+ field, kind, nullable, found = fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
+ if !found {
+ return defaultNotFoundValue
+ }
+ }
+ switch kind {
+ case reflect.Invalid:
+ return defaultNotFoundValue
+ case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func:
+ return field.IsNil()
+ default:
+ if nullable && field.Interface() != nil {
+ return false
+ }
+ return field.IsValid() && field.Interface() == reflect.Zero(field.Type()).Interface()
+ }
+}
+
+// requireCheckFieldValue is a func for check field value
+func requireCheckFieldValue(fl FieldLevel, param string, value string, defaultNotFoundValue bool) bool {
+ field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
+ if !found {
+ return defaultNotFoundValue
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return field.Int() == asInt(value)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return field.Uint() == asUint(value)
+
+ case reflect.Float32, reflect.Float64:
+ return field.Float() == asFloat(value)
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ return int64(field.Len()) == asInt(value)
+ }
+
+ // default reflect.String:
+ return field.String() == value
+}
+
+// requiredIf is the validation function
+// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field.
+func requiredIf(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName()))
+ }
+ for i := 0; i < len(params); i += 2 {
+ if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// requiredUnless is the validation function
+// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field.
+func requiredUnless(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ if len(params)%2 != 0 {
+ panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName()))
+ }
+
+ for i := 0; i < len(params); i += 2 {
+ if requireCheckFieldValue(fl, params[i], params[i+1], false) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// ExcludedWith is the validation function
+// The field under validation must not be present or is empty if any of the other specified fields are present.
+func excludedWith(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return !hasValue(fl)
+ }
+ }
+ return true
+}
+
+// RequiredWith is the validation function
+// The field under validation must be present and not empty only if any of the other specified fields are present.
+func requiredWith(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return hasValue(fl)
+ }
+ }
+ return true
+}
+
+// ExcludedWithAll is the validation function
+// The field under validation must not be present or is empty if all of the other specified fields are present.
+func excludedWithAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return !hasValue(fl)
+}
+
+// RequiredWithAll is the validation function
+// The field under validation must be present and not empty only if all of the other specified fields are present.
+func requiredWithAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// ExcludedWithout is the validation function
+// The field under validation must not be present or is empty when any of the other specified fields are not present.
+func excludedWithout(fl FieldLevel) bool {
+ if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
+ return !hasValue(fl)
+ }
+ return true
+}
+
+// RequiredWithout is the validation function
+// The field under validation must be present and not empty only when any of the other specified fields are not present.
+func requiredWithout(fl FieldLevel) bool {
+ if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) {
+ return hasValue(fl)
+ }
+ return true
+}
+
+// RequiredWithoutAll is the validation function
+// The field under validation must not be present or is empty when all of the other specified fields are not present.
+func excludedWithoutAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return !hasValue(fl)
+}
+
+// RequiredWithoutAll is the validation function
+// The field under validation must be present and not empty only when all of the other specified fields are not present.
+func requiredWithoutAll(fl FieldLevel) bool {
+ params := parseOneOfParam2(fl.Param())
+ for _, param := range params {
+ if !requireCheckFieldKind(fl, param, true) {
+ return true
+ }
+ }
+ return hasValue(fl)
+}
+
+// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value.
+func isGteField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() >= currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() >= currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() >= currentField.Float()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.After(t) || fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) >= len(currentField.String())
+}
+
+// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value.
+func isGtField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() > currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() > currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() > currentField.Float()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.After(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) > len(currentField.String())
+}
+
+// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value.
+func isGte(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) >= p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) >= p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() >= p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() >= p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() >= p
+
+ case reflect.Struct:
+
+ if field.Type() == timeType {
+
+ now := time.Now().UTC()
+ t := field.Interface().(time.Time)
+
+ return t.After(now) || t.Equal(now)
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsGt is the validation function for validating if the current field's value is greater than the param's value.
+func isGt(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) > p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) > p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() > p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() > p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() > p
+ case reflect.Struct:
+
+ if field.Type() == timeType {
+
+ return field.Interface().(time.Time).After(time.Now().UTC())
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value.
+func hasLengthOf(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) == p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) == p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() == p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() == p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() == p
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value.
+func hasMinOf(fl FieldLevel) bool {
+ return isGte(fl)
+}
+
+// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value.
+func isLteField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() <= currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() <= currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() <= currentField.Float()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Before(t) || fieldTime.Equal(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) <= len(currentField.String())
+}
+
+// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value.
+func isLtField(fl FieldLevel) bool {
+
+ field := fl.Field()
+ kind := field.Kind()
+
+ currentField, currentKind, ok := fl.GetStructFieldOK()
+ if !ok || currentKind != kind {
+ return false
+ }
+
+ switch kind {
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+
+ return field.Int() < currentField.Int()
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+
+ return field.Uint() < currentField.Uint()
+
+ case reflect.Float32, reflect.Float64:
+
+ return field.Float() < currentField.Float()
+
+ case reflect.Struct:
+
+ fieldType := field.Type()
+
+ // Not Same underlying type i.e. struct and time
+ if fieldType != currentField.Type() {
+ return false
+ }
+
+ if fieldType == timeType {
+
+ t := currentField.Interface().(time.Time)
+ fieldTime := field.Interface().(time.Time)
+
+ return fieldTime.Before(t)
+ }
+ }
+
+ // default reflect.String
+ return len(field.String()) < len(currentField.String())
+}
+
+// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value.
+func isLte(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) <= p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) <= p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() <= p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() <= p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() <= p
+
+ case reflect.Struct:
+
+ if field.Type() == timeType {
+
+ now := time.Now().UTC()
+ t := field.Interface().(time.Time)
+
+ return t.Before(now) || t.Equal(now)
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// IsLt is the validation function for validating if the current field's value is less than the param's value.
+func isLt(fl FieldLevel) bool {
+
+ field := fl.Field()
+ param := fl.Param()
+
+ switch field.Kind() {
+
+ case reflect.String:
+ p := asInt(param)
+
+ return int64(utf8.RuneCountInString(field.String())) < p
+
+ case reflect.Slice, reflect.Map, reflect.Array:
+ p := asInt(param)
+
+ return int64(field.Len()) < p
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p := asIntFromType(field.Type(), param)
+
+ return field.Int() < p
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ p := asUint(param)
+
+ return field.Uint() < p
+
+ case reflect.Float32, reflect.Float64:
+ p := asFloat(param)
+
+ return field.Float() < p
+
+ case reflect.Struct:
+
+ if field.Type() == timeType {
+
+ return field.Interface().(time.Time).Before(time.Now().UTC())
+ }
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value.
+func hasMaxOf(fl FieldLevel) bool {
+ return isLte(fl)
+}
+
+// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address.
+func isTCP4AddrResolvable(fl FieldLevel) bool {
+
+ if !isIP4Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp4", fl.Field().String())
+ return err == nil
+}
+
+// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address.
+func isTCP6AddrResolvable(fl FieldLevel) bool {
+
+ if !isIP6Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp6", fl.Field().String())
+
+ return err == nil
+}
+
+// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address.
+func isTCPAddrResolvable(fl FieldLevel) bool {
+
+ if !isIP4Addr(fl) && !isIP6Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveTCPAddr("tcp", fl.Field().String())
+
+ return err == nil
+}
+
+// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address.
+func isUDP4AddrResolvable(fl FieldLevel) bool {
+
+ if !isIP4Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp4", fl.Field().String())
+
+ return err == nil
+}
+
+// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address.
+func isUDP6AddrResolvable(fl FieldLevel) bool {
+
+ if !isIP6Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp6", fl.Field().String())
+
+ return err == nil
+}
+
+// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address.
+func isUDPAddrResolvable(fl FieldLevel) bool {
+
+ if !isIP4Addr(fl) && !isIP6Addr(fl) {
+ return false
+ }
+
+ _, err := net.ResolveUDPAddr("udp", fl.Field().String())
+
+ return err == nil
+}
+
+// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address.
+func isIP4AddrResolvable(fl FieldLevel) bool {
+
+ if !isIPv4(fl) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip4", fl.Field().String())
+
+ return err == nil
+}
+
+// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address.
+func isIP6AddrResolvable(fl FieldLevel) bool {
+
+ if !isIPv6(fl) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip6", fl.Field().String())
+
+ return err == nil
+}
+
+// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address.
+func isIPAddrResolvable(fl FieldLevel) bool {
+
+ if !isIP(fl) {
+ return false
+ }
+
+ _, err := net.ResolveIPAddr("ip", fl.Field().String())
+
+ return err == nil
+}
+
+// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address.
+func isUnixAddrResolvable(fl FieldLevel) bool {
+
+ _, err := net.ResolveUnixAddr("unix", fl.Field().String())
+
+ return err == nil
+}
+
+func isIP4Addr(fl FieldLevel) bool {
+
+ val := fl.Field().String()
+
+ if idx := strings.LastIndex(val, ":"); idx != -1 {
+ val = val[0:idx]
+ }
+
+ ip := net.ParseIP(val)
+
+ return ip != nil && ip.To4() != nil
+}
+
+func isIP6Addr(fl FieldLevel) bool {
+
+ val := fl.Field().String()
+
+ if idx := strings.LastIndex(val, ":"); idx != -1 {
+ if idx != 0 && val[idx-1:idx] == "]" {
+ val = val[1 : idx-1]
+ }
+ }
+
+ ip := net.ParseIP(val)
+
+ return ip != nil && ip.To4() == nil
+}
+
+func isHostnameRFC952(fl FieldLevel) bool {
+ return hostnameRegexRFC952.MatchString(fl.Field().String())
+}
+
+func isHostnameRFC1123(fl FieldLevel) bool {
+ return hostnameRegexRFC1123.MatchString(fl.Field().String())
+}
+
+func isFQDN(fl FieldLevel) bool {
+ val := fl.Field().String()
+
+ if val == "" {
+ return false
+ }
+
+ return fqdnRegexRFC1123.MatchString(val)
+}
+
+// IsDir is the validation function for validating if the current field's value is a valid directory.
+func isDir(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ fileInfo, err := os.Stat(field.String())
+ if err != nil {
+ return false
+ }
+
+ return fileInfo.IsDir()
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isJSON is the validation function for validating if the current field's value is a valid json string.
+func isJSON(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ val := field.String()
+ return json.Valid([]byte(val))
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isHostnamePort validates a : combination for fields typically used for socket address.
+func isHostnamePort(fl FieldLevel) bool {
+ val := fl.Field().String()
+ host, port, err := net.SplitHostPort(val)
+ if err != nil {
+ return false
+ }
+ // Port must be a iny <= 65535.
+ if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 1 {
+ return false
+ }
+
+ // If host is specified, it should match a DNS name
+ if host != "" {
+ return hostnameRegexRFC1123.MatchString(host)
+ }
+ return true
+}
+
+// isLowercase is the validation function for validating if the current field's value is a lowercase string.
+func isLowercase(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ if field.String() == "" {
+ return false
+ }
+ return field.String() == strings.ToLower(field.String())
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isUppercase is the validation function for validating if the current field's value is an uppercase string.
+func isUppercase(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ if field.String() == "" {
+ return false
+ }
+ return field.String() == strings.ToUpper(field.String())
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isDatetime is the validation function for validating if the current field's value is a valid datetime string.
+func isDatetime(fl FieldLevel) bool {
+ field := fl.Field()
+ param := fl.Param()
+
+ if field.Kind() == reflect.String {
+ _, err := time.Parse(param, field.String())
+
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isTimeZone is the validation function for validating if the current field's value is a valid time zone string.
+func isTimeZone(fl FieldLevel) bool {
+ field := fl.Field()
+
+ if field.Kind() == reflect.String {
+ // empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name
+ if field.String() == "" {
+ return false
+ }
+
+ // Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name
+ if strings.ToLower(field.String()) == "local" {
+ return false
+ }
+
+ _, err := time.LoadLocation(field.String())
+ return err == nil
+ }
+
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+}
+
+// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code.
+func isIso3166Alpha2(fl FieldLevel) bool {
+ val := fl.Field().String()
+ return iso3166_1_alpha2[val]
+}
+
+// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code.
+func isIso3166Alpha3(fl FieldLevel) bool {
+ val := fl.Field().String()
+ return iso3166_1_alpha3[val]
+}
+
+// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code.
+func isIso3166AlphaNumeric(fl FieldLevel) bool {
+ field := fl.Field()
+
+ var code int
+ switch field.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ code = int(field.Int() % 1000)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ code = int(field.Uint() % 1000)
+ default:
+ panic(fmt.Sprintf("Bad field type %T", field.Interface()))
+ }
+ return iso3166_1_alpha_numeric[code]
+}
diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go
new file mode 100644
index 0000000..0d18d6e
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/cache.go
@@ -0,0 +1,322 @@
+package validator
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type tagType uint8
+
+const (
+ typeDefault tagType = iota
+ typeOmitEmpty
+ typeIsDefault
+ typeNoStructLevel
+ typeStructOnly
+ typeDive
+ typeOr
+ typeKeys
+ typeEndKeys
+)
+
+const (
+ invalidValidation = "Invalid validation tag on field '%s'"
+ undefinedValidation = "Undefined validation function '%s' on field '%s'"
+ keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag"
+)
+
+type structCache struct {
+ lock sync.Mutex
+ m atomic.Value // map[reflect.Type]*cStruct
+}
+
+func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) {
+ c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key]
+ return
+}
+
+func (sc *structCache) Set(key reflect.Type, value *cStruct) {
+ m := sc.m.Load().(map[reflect.Type]*cStruct)
+ nm := make(map[reflect.Type]*cStruct, len(m)+1)
+ for k, v := range m {
+ nm[k] = v
+ }
+ nm[key] = value
+ sc.m.Store(nm)
+}
+
+type tagCache struct {
+ lock sync.Mutex
+ m atomic.Value // map[string]*cTag
+}
+
+func (tc *tagCache) Get(key string) (c *cTag, found bool) {
+ c, found = tc.m.Load().(map[string]*cTag)[key]
+ return
+}
+
+func (tc *tagCache) Set(key string, value *cTag) {
+ m := tc.m.Load().(map[string]*cTag)
+ nm := make(map[string]*cTag, len(m)+1)
+ for k, v := range m {
+ nm[k] = v
+ }
+ nm[key] = value
+ tc.m.Store(nm)
+}
+
+type cStruct struct {
+ name string
+ fields []*cField
+ fn StructLevelFuncCtx
+}
+
+type cField struct {
+ idx int
+ name string
+ altName string
+ namesEqual bool
+ cTags *cTag
+}
+
+type cTag struct {
+ tag string
+ aliasTag string
+ actualAliasTag string
+ param string
+ keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation
+ next *cTag
+ fn FuncCtx
+ typeof tagType
+ hasTag bool
+ hasAlias bool
+ hasParam bool // true if parameter used eg. eq= where the equal sign has been set
+ isBlockEnd bool // indicates the current tag represents the last validation in the block
+ runValidationWhenNil bool
+}
+
+func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct {
+ v.structCache.lock.Lock()
+ defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise!
+
+ typ := current.Type()
+
+ // could have been multiple trying to access, but once first is done this ensures struct
+ // isn't parsed again.
+ cs, ok := v.structCache.Get(typ)
+ if ok {
+ return cs
+ }
+
+ cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]}
+
+ numFields := current.NumField()
+
+ var ctag *cTag
+ var fld reflect.StructField
+ var tag string
+ var customName string
+
+ for i := 0; i < numFields; i++ {
+
+ fld = typ.Field(i)
+
+ if !fld.Anonymous && len(fld.PkgPath) > 0 {
+ continue
+ }
+
+ tag = fld.Tag.Get(v.tagName)
+
+ if tag == skipValidationTag {
+ continue
+ }
+
+ customName = fld.Name
+
+ if v.hasTagNameFunc {
+ name := v.tagNameFunc(fld)
+ if len(name) > 0 {
+ customName = name
+ }
+ }
+
+ // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different
+ // and so only struct level caching can be used instead of combined with Field tag caching
+
+ if len(tag) > 0 {
+ ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false)
+ } else {
+ // even if field doesn't have validations need cTag for traversing to potential inner/nested
+ // elements of the field.
+ ctag = new(cTag)
+ }
+
+ cs.fields = append(cs.fields, &cField{
+ idx: i,
+ name: fld.Name,
+ altName: customName,
+ cTags: ctag,
+ namesEqual: fld.Name == customName,
+ })
+ }
+ v.structCache.Set(typ, cs)
+ return cs
+}
+
+func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) {
+ var t string
+ noAlias := len(alias) == 0
+ tags := strings.Split(tag, tagSeparator)
+
+ for i := 0; i < len(tags); i++ {
+ t = tags[i]
+ if noAlias {
+ alias = t
+ }
+
+ // check map for alias and process new tags, otherwise process as usual
+ if tagsVal, found := v.aliases[t]; found {
+ if i == 0 {
+ firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
+ } else {
+ next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true)
+ current.next, current = next, curr
+
+ }
+ continue
+ }
+
+ var prevTag tagType
+
+ if i == 0 {
+ current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault}
+ firstCtag = current
+ } else {
+ prevTag = current.typeof
+ current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true}
+ current = current.next
+ }
+
+ switch t {
+ case diveTag:
+ current.typeof = typeDive
+ continue
+
+ case keysTag:
+ current.typeof = typeKeys
+
+ if i == 0 || prevTag != typeDive {
+ panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag))
+ }
+
+ current.typeof = typeKeys
+
+ // need to pass along only keys tag
+ // need to increment i to skip over the keys tags
+ b := make([]byte, 0, 64)
+
+ i++
+
+ for ; i < len(tags); i++ {
+
+ b = append(b, tags[i]...)
+ b = append(b, ',')
+
+ if tags[i] == endKeysTag {
+ break
+ }
+ }
+
+ current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false)
+ continue
+
+ case endKeysTag:
+ current.typeof = typeEndKeys
+
+ // if there are more in tags then there was no keysTag defined
+ // and an error should be thrown
+ if i != len(tags)-1 {
+ panic(keysTagNotDefined)
+ }
+ return
+
+ case omitempty:
+ current.typeof = typeOmitEmpty
+ continue
+
+ case structOnlyTag:
+ current.typeof = typeStructOnly
+ continue
+
+ case noStructLevelTag:
+ current.typeof = typeNoStructLevel
+ continue
+
+ default:
+ if t == isdefault {
+ current.typeof = typeIsDefault
+ }
+ // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C"
+ orVals := strings.Split(t, orSeparator)
+
+ for j := 0; j < len(orVals); j++ {
+ vals := strings.SplitN(orVals[j], tagKeySeparator, 2)
+ if noAlias {
+ alias = vals[0]
+ current.aliasTag = alias
+ } else {
+ current.actualAliasTag = t
+ }
+
+ if j > 0 {
+ current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true}
+ current = current.next
+ }
+ current.hasParam = len(vals) > 1
+
+ current.tag = vals[0]
+ if len(current.tag) == 0 {
+ panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName)))
+ }
+
+ if wrapper, ok := v.validations[current.tag]; ok {
+ current.fn = wrapper.fn
+ current.runValidationWhenNil = wrapper.runValidatinOnNil
+ } else {
+ panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName)))
+ }
+
+ if len(orVals) > 1 {
+ current.typeof = typeOr
+ }
+
+ if len(vals) > 1 {
+ current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1)
+ }
+ }
+ current.isBlockEnd = true
+ }
+ }
+ return
+}
+
+func (v *Validate) fetchCacheTag(tag string) *cTag {
+ // find cached tag
+ ctag, found := v.tagCache.Get(tag)
+ if !found {
+ v.tagCache.lock.Lock()
+ defer v.tagCache.lock.Unlock()
+
+ // could have been multiple trying to access, but once first is done this ensures tag
+ // isn't parsed again.
+ ctag, found = v.tagCache.Get(tag)
+ if !found {
+ ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false)
+ v.tagCache.Set(tag, ctag)
+ }
+ }
+ return ctag
+}
diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go
new file mode 100644
index 0000000..ef81ead
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/country_codes.go
@@ -0,0 +1,162 @@
+package validator
+
+var iso3166_1_alpha2 = map[string]bool{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ "AF": true, "AX": true, "AL": true, "DZ": true, "AS": true,
+ "AD": true, "AO": true, "AI": true, "AQ": true, "AG": true,
+ "AR": true, "AM": true, "AW": true, "AU": true, "AT": true,
+ "AZ": true, "BS": true, "BH": true, "BD": true, "BB": true,
+ "BY": true, "BE": true, "BZ": true, "BJ": true, "BM": true,
+ "BT": true, "BO": true, "BQ": true, "BA": true, "BW": true,
+ "BV": true, "BR": true, "IO": true, "BN": true, "BG": true,
+ "BF": true, "BI": true, "KH": true, "CM": true, "CA": true,
+ "CV": true, "KY": true, "CF": true, "TD": true, "CL": true,
+ "CN": true, "CX": true, "CC": true, "CO": true, "KM": true,
+ "CG": true, "CD": true, "CK": true, "CR": true, "CI": true,
+ "HR": true, "CU": true, "CW": true, "CY": true, "CZ": true,
+ "DK": true, "DJ": true, "DM": true, "DO": true, "EC": true,
+ "EG": true, "SV": true, "GQ": true, "ER": true, "EE": true,
+ "ET": true, "FK": true, "FO": true, "FJ": true, "FI": true,
+ "FR": true, "GF": true, "PF": true, "TF": true, "GA": true,
+ "GM": true, "GE": true, "DE": true, "GH": true, "GI": true,
+ "GR": true, "GL": true, "GD": true, "GP": true, "GU": true,
+ "GT": true, "GG": true, "GN": true, "GW": true, "GY": true,
+ "HT": true, "HM": true, "VA": true, "HN": true, "HK": true,
+ "HU": true, "IS": true, "IN": true, "ID": true, "IR": true,
+ "IQ": true, "IE": true, "IM": true, "IL": true, "IT": true,
+ "JM": true, "JP": true, "JE": true, "JO": true, "KZ": true,
+ "KE": true, "KI": true, "KP": true, "KR": true, "KW": true,
+ "KG": true, "LA": true, "LV": true, "LB": true, "LS": true,
+ "LR": true, "LY": true, "LI": true, "LT": true, "LU": true,
+ "MO": true, "MK": true, "MG": true, "MW": true, "MY": true,
+ "MV": true, "ML": true, "MT": true, "MH": true, "MQ": true,
+ "MR": true, "MU": true, "YT": true, "MX": true, "FM": true,
+ "MD": true, "MC": true, "MN": true, "ME": true, "MS": true,
+ "MA": true, "MZ": true, "MM": true, "NA": true, "NR": true,
+ "NP": true, "NL": true, "NC": true, "NZ": true, "NI": true,
+ "NE": true, "NG": true, "NU": true, "NF": true, "MP": true,
+ "NO": true, "OM": true, "PK": true, "PW": true, "PS": true,
+ "PA": true, "PG": true, "PY": true, "PE": true, "PH": true,
+ "PN": true, "PL": true, "PT": true, "PR": true, "QA": true,
+ "RE": true, "RO": true, "RU": true, "RW": true, "BL": true,
+ "SH": true, "KN": true, "LC": true, "MF": true, "PM": true,
+ "VC": true, "WS": true, "SM": true, "ST": true, "SA": true,
+ "SN": true, "RS": true, "SC": true, "SL": true, "SG": true,
+ "SX": true, "SK": true, "SI": true, "SB": true, "SO": true,
+ "ZA": true, "GS": true, "SS": true, "ES": true, "LK": true,
+ "SD": true, "SR": true, "SJ": true, "SZ": true, "SE": true,
+ "CH": true, "SY": true, "TW": true, "TJ": true, "TZ": true,
+ "TH": true, "TL": true, "TG": true, "TK": true, "TO": true,
+ "TT": true, "TN": true, "TR": true, "TM": true, "TC": true,
+ "TV": true, "UG": true, "UA": true, "AE": true, "GB": true,
+ "US": true, "UM": true, "UY": true, "UZ": true, "VU": true,
+ "VE": true, "VN": true, "VG": true, "VI": true, "WF": true,
+ "EH": true, "YE": true, "ZM": true, "ZW": true,
+}
+
+var iso3166_1_alpha3 = map[string]bool{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ "AFG": true, "ALB": true, "DZA": true, "ASM": true, "AND": true,
+ "AGO": true, "AIA": true, "ATA": true, "ATG": true, "ARG": true,
+ "ARM": true, "ABW": true, "AUS": true, "AUT": true, "AZE": true,
+ "BHS": true, "BHR": true, "BGD": true, "BRB": true, "BLR": true,
+ "BEL": true, "BLZ": true, "BEN": true, "BMU": true, "BTN": true,
+ "BOL": true, "BES": true, "BIH": true, "BWA": true, "BVT": true,
+ "BRA": true, "IOT": true, "BRN": true, "BGR": true, "BFA": true,
+ "BDI": true, "CPV": true, "KHM": true, "CMR": true, "CAN": true,
+ "CYM": true, "CAF": true, "TCD": true, "CHL": true, "CHN": true,
+ "CXR": true, "CCK": true, "COL": true, "COM": true, "COD": true,
+ "COG": true, "COK": true, "CRI": true, "HRV": true, "CUB": true,
+ "CUW": true, "CYP": true, "CZE": true, "CIV": true, "DNK": true,
+ "DJI": true, "DMA": true, "DOM": true, "ECU": true, "EGY": true,
+ "SLV": true, "GNQ": true, "ERI": true, "EST": true, "SWZ": true,
+ "ETH": true, "FLK": true, "FRO": true, "FJI": true, "FIN": true,
+ "FRA": true, "GUF": true, "PYF": true, "ATF": true, "GAB": true,
+ "GMB": true, "GEO": true, "DEU": true, "GHA": true, "GIB": true,
+ "GRC": true, "GRL": true, "GRD": true, "GLP": true, "GUM": true,
+ "GTM": true, "GGY": true, "GIN": true, "GNB": true, "GUY": true,
+ "HTI": true, "HMD": true, "VAT": true, "HND": true, "HKG": true,
+ "HUN": true, "ISL": true, "IND": true, "IDN": true, "IRN": true,
+ "IRQ": true, "IRL": true, "IMN": true, "ISR": true, "ITA": true,
+ "JAM": true, "JPN": true, "JEY": true, "JOR": true, "KAZ": true,
+ "KEN": true, "KIR": true, "PRK": true, "KOR": true, "KWT": true,
+ "KGZ": true, "LAO": true, "LVA": true, "LBN": true, "LSO": true,
+ "LBR": true, "LBY": true, "LIE": true, "LTU": true, "LUX": true,
+ "MAC": true, "MDG": true, "MWI": true, "MYS": true, "MDV": true,
+ "MLI": true, "MLT": true, "MHL": true, "MTQ": true, "MRT": true,
+ "MUS": true, "MYT": true, "MEX": true, "FSM": true, "MDA": true,
+ "MCO": true, "MNG": true, "MNE": true, "MSR": true, "MAR": true,
+ "MOZ": true, "MMR": true, "NAM": true, "NRU": true, "NPL": true,
+ "NLD": true, "NCL": true, "NZL": true, "NIC": true, "NER": true,
+ "NGA": true, "NIU": true, "NFK": true, "MKD": true, "MNP": true,
+ "NOR": true, "OMN": true, "PAK": true, "PLW": true, "PSE": true,
+ "PAN": true, "PNG": true, "PRY": true, "PER": true, "PHL": true,
+ "PCN": true, "POL": true, "PRT": true, "PRI": true, "QAT": true,
+ "ROU": true, "RUS": true, "RWA": true, "REU": true, "BLM": true,
+ "SHN": true, "KNA": true, "LCA": true, "MAF": true, "SPM": true,
+ "VCT": true, "WSM": true, "SMR": true, "STP": true, "SAU": true,
+ "SEN": true, "SRB": true, "SYC": true, "SLE": true, "SGP": true,
+ "SXM": true, "SVK": true, "SVN": true, "SLB": true, "SOM": true,
+ "ZAF": true, "SGS": true, "SSD": true, "ESP": true, "LKA": true,
+ "SDN": true, "SUR": true, "SJM": true, "SWE": true, "CHE": true,
+ "SYR": true, "TWN": true, "TJK": true, "TZA": true, "THA": true,
+ "TLS": true, "TGO": true, "TKL": true, "TON": true, "TTO": true,
+ "TUN": true, "TUR": true, "TKM": true, "TCA": true, "TUV": true,
+ "UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true,
+ "USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true,
+ "VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true,
+ "YEM": true, "ZMB": true, "ZWE": true, "ALA": true,
+}
+var iso3166_1_alpha_numeric = map[int]bool{
+ // see: https://www.iso.org/iso-3166-country-codes.html
+ 4: true, 8: true, 12: true, 16: true, 20: true,
+ 24: true, 660: true, 10: true, 28: true, 32: true,
+ 51: true, 533: true, 36: true, 40: true, 31: true,
+ 44: true, 48: true, 50: true, 52: true, 112: true,
+ 56: true, 84: true, 204: true, 60: true, 64: true,
+ 68: true, 535: true, 70: true, 72: true, 74: true,
+ 76: true, 86: true, 96: true, 100: true, 854: true,
+ 108: true, 132: true, 116: true, 120: true, 124: true,
+ 136: true, 140: true, 148: true, 152: true, 156: true,
+ 162: true, 166: true, 170: true, 174: true, 180: true,
+ 178: true, 184: true, 188: true, 191: true, 192: true,
+ 531: true, 196: true, 203: true, 384: true, 208: true,
+ 262: true, 212: true, 214: true, 218: true, 818: true,
+ 222: true, 226: true, 232: true, 233: true, 748: true,
+ 231: true, 238: true, 234: true, 242: true, 246: true,
+ 250: true, 254: true, 258: true, 260: true, 266: true,
+ 270: true, 268: true, 276: true, 288: true, 292: true,
+ 300: true, 304: true, 308: true, 312: true, 316: true,
+ 320: true, 831: true, 324: true, 624: true, 328: true,
+ 332: true, 334: true, 336: true, 340: true, 344: true,
+ 348: true, 352: true, 356: true, 360: true, 364: true,
+ 368: true, 372: true, 833: true, 376: true, 380: true,
+ 388: true, 392: true, 832: true, 400: true, 398: true,
+ 404: true, 296: true, 408: true, 410: true, 414: true,
+ 417: true, 418: true, 428: true, 422: true, 426: true,
+ 430: true, 434: true, 438: true, 440: true, 442: true,
+ 446: true, 450: true, 454: true, 458: true, 462: true,
+ 466: true, 470: true, 584: true, 474: true, 478: true,
+ 480: true, 175: true, 484: true, 583: true, 498: true,
+ 492: true, 496: true, 499: true, 500: true, 504: true,
+ 508: true, 104: true, 516: true, 520: true, 524: true,
+ 528: true, 540: true, 554: true, 558: true, 562: true,
+ 566: true, 570: true, 574: true, 807: true, 580: true,
+ 578: true, 512: true, 586: true, 585: true, 275: true,
+ 591: true, 598: true, 600: true, 604: true, 608: true,
+ 612: true, 616: true, 620: true, 630: true, 634: true,
+ 642: true, 643: true, 646: true, 638: true, 652: true,
+ 654: true, 659: true, 662: true, 663: true, 666: true,
+ 670: true, 882: true, 674: true, 678: true, 682: true,
+ 686: true, 688: true, 690: true, 694: true, 702: true,
+ 534: true, 703: true, 705: true, 90: true, 706: true,
+ 710: true, 239: true, 728: true, 724: true, 144: true,
+ 729: true, 740: true, 744: true, 752: true, 756: true,
+ 760: true, 158: true, 762: true, 834: true, 764: true,
+ 626: true, 768: true, 772: true, 776: true, 780: true,
+ 788: true, 792: true, 795: true, 796: true, 798: true,
+ 800: true, 804: true, 784: true, 826: true, 581: true,
+ 840: true, 858: true, 860: true, 548: true, 862: true,
+ 704: true, 92: true, 850: true, 876: true, 732: true,
+ 887: true, 894: true, 716: true, 248: true,
+}
diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go
new file mode 100644
index 0000000..a816c20
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/doc.go
@@ -0,0 +1,1308 @@
+/*
+Package validator implements value validations for structs and individual fields
+based on tags.
+
+It can also handle Cross-Field and Cross-Struct validation for nested structs
+and has the ability to dive into arrays and maps of any type.
+
+see more examples https://github.com/go-playground/validator/tree/master/_examples
+
+Validation Functions Return Type error
+
+Doing things this way is actually the way the standard library does, see the
+file.Open method here:
+
+ https://golang.org/pkg/os/#Open.
+
+The authors return type "error" to avoid the issue discussed in the following,
+where err is always != nil:
+
+ http://stackoverflow.com/a/29138676/3158232
+ https://github.com/go-playground/validator/issues/134
+
+Validator only InvalidValidationError for bad validation input, nil or
+ValidationErrors as type error; so, in your code all you need to do is check
+if the error returned is not nil, and if it's not check if error is
+InvalidValidationError ( if necessary, most of the time it isn't ) type cast
+it to type ValidationErrors like so err.(validator.ValidationErrors).
+
+Custom Validation Functions
+
+Custom Validation functions can be added. Example:
+
+ // Structure
+ func customFunc(fl validator.FieldLevel) bool {
+
+ if fl.Field().String() == "invalid" {
+ return false
+ }
+
+ return true
+ }
+
+ validate.RegisterValidation("custom tag name", customFunc)
+ // NOTES: using the same tag name as an existing function
+ // will overwrite the existing one
+
+Cross-Field Validation
+
+Cross-Field Validation can be done via the following tags:
+ - eqfield
+ - nefield
+ - gtfield
+ - gtefield
+ - ltfield
+ - ltefield
+ - eqcsfield
+ - necsfield
+ - gtcsfield
+ - gtecsfield
+ - ltcsfield
+ - ltecsfield
+
+If, however, some custom cross-field validation is required, it can be done
+using a custom validation.
+
+Why not just have cross-fields validation tags (i.e. only eqcsfield and not
+eqfield)?
+
+The reason is efficiency. If you want to check a field within the same struct
+"eqfield" only has to find the field on the same struct (1 level). But, if we
+used "eqcsfield" it could be multiple levels down. Example:
+
+ type Inner struct {
+ StartDate time.Time
+ }
+
+ type Outer struct {
+ InnerStructField *Inner
+ CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"`
+ }
+
+ now := time.Now()
+
+ inner := &Inner{
+ StartDate: now,
+ }
+
+ outer := &Outer{
+ InnerStructField: inner,
+ CreatedAt: now,
+ }
+
+ errs := validate.Struct(outer)
+
+ // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed
+ // into the function
+ // when calling validate.VarWithValue(val, field, tag) val will be
+ // whatever you pass, struct, field...
+ // when calling validate.Field(field, tag) val will be nil
+
+Multiple Validators
+
+Multiple validators on a field will process in the order defined. Example:
+
+ type Test struct {
+ Field `validate:"max=10,min=1"`
+ }
+
+ // max will be checked then min
+
+Bad Validator definitions are not handled by the library. Example:
+
+ type Test struct {
+ Field `validate:"min=10,max=0"`
+ }
+
+ // this definition of min max will never succeed
+
+Using Validator Tags
+
+Baked In Cross-Field validation only compares fields on the same struct.
+If Cross-Field + Cross-Struct validation is needed you should implement your
+own custom validator.
+
+Comma (",") is the default separator of validation tags. If you wish to
+have a comma included within the parameter (i.e. excludesall=,) you will need to
+use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma,
+so the above will become excludesall=0x2C.
+
+ type Test struct {
+ Field `validate:"excludesall=,"` // BAD! Do not include a comma.
+ Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation.
+ }
+
+Pipe ("|") is the 'or' validation tags deparator. If you wish to
+have a pipe included within the parameter i.e. excludesall=| you will need to
+use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe,
+so the above will become excludesall=0x7C
+
+ type Test struct {
+ Field `validate:"excludesall=|"` // BAD! Do not include a a pipe!
+ Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation.
+ }
+
+
+Baked In Validators and Tags
+
+Here is a list of the current built in validators:
+
+
+Skip Field
+
+Tells the validation to skip this struct field; this is particularly
+handy in ignoring embedded structs from being validated. (Usage: -)
+ Usage: -
+
+
+Or Operator
+
+This is the 'or' operator allowing multiple validators to be used and
+accepted. (Usage: rgb|rgba) <-- this would allow either rgb or rgba
+colors to be accepted. This can also be combined with 'and' for example
+( Usage: omitempty,rgb|rgba)
+
+ Usage: |
+
+StructOnly
+
+When a field that is a nested struct is encountered, and contains this flag
+any validation on the nested struct will be run, but none of the nested
+struct fields will be validated. This is useful if inside of your program
+you know the struct will be valid, but need to verify it has been assigned.
+NOTE: only "required" and "omitempty" can be used on a struct itself.
+
+ Usage: structonly
+
+NoStructLevel
+
+Same as structonly tag except that any struct level validations will not run.
+
+ Usage: nostructlevel
+
+Omit Empty
+
+Allows conditional validation, for example if a field is not set with
+a value (Determined by the "required" validator) then other validation
+such as min or max won't run, but if a value is set validation will run.
+
+ Usage: omitempty
+
+Dive
+
+This tells the validator to dive into a slice, array or map and validate that
+level of the slice, array or map with the validation tags that follow.
+Multidimensional nesting is also supported, each level you wish to dive will
+require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see
+the Keys & EndKeys section just below.
+
+ Usage: dive
+
+Example #1
+
+ [][]string with validation tag "gt=0,dive,len=1,dive,required"
+ // gt=0 will be applied to []
+ // len=1 will be applied to []string
+ // required will be applied to string
+
+Example #2
+
+ [][]string with validation tag "gt=0,dive,dive,required"
+ // gt=0 will be applied to []
+ // []string will be spared validation
+ // required will be applied to string
+
+Keys & EndKeys
+
+These are to be used together directly after the dive tag and tells the validator
+that anything between 'keys' and 'endkeys' applies to the keys of a map and not the
+values; think of it like the 'dive' tag, but for map keys instead of values.
+Multidimensional nesting is also supported, each level you wish to validate will
+require another 'keys' and 'endkeys' tag. These tags are only valid for maps.
+
+ Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags
+
+Example #1
+
+ map[string]string with validation tag "gt=0,dive,keys,eg=1|eq=2,endkeys,required"
+ // gt=0 will be applied to the map itself
+ // eg=1|eq=2 will be applied to the map keys
+ // required will be applied to map values
+
+Example #2
+
+ map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required"
+ // gt=0 will be applied to the map itself
+ // eg=1|eq=2 will be applied to each array element in the the map keys
+ // required will be applied to map values
+
+Required
+
+This validates that the value is not the data types default zero value.
+For numbers ensures value is not zero. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required
+
+Required If
+
+The field under validation must be present and not empty only if all
+the other specified fields are equal to the value following the specified
+field. For strings ensures value is not "". For slices, maps, pointers,
+interfaces, channels and functions ensures the value is not nil.
+
+ Usage: required_if
+
+Examples:
+
+ // require the field if the Field1 is equal to the parameter given:
+ Usage: required_if=Field1 foobar
+
+ // require the field if the Field1 and Field2 is equal to the value respectively:
+ Usage: required_if=Field1 foo Field2 bar
+
+Required Unless
+
+The field under validation must be present and not empty unless all
+the other specified fields are equal to the value following the specified
+field. For strings ensures value is not "". For slices, maps, pointers,
+interfaces, channels and functions ensures the value is not nil.
+
+ Usage: required_unless
+
+Examples:
+
+ // require the field unless the Field1 is equal to the parameter given:
+ Usage: required_unless=Field1 foobar
+
+ // require the field unless the Field1 and Field2 is equal to the value respectively:
+ Usage: required_unless=Field1 foo Field2 bar
+
+Required With
+
+The field under validation must be present and not empty only if any
+of the other specified fields are present. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required_with
+
+Examples:
+
+ // require the field if the Field1 is present:
+ Usage: required_with=Field1
+
+ // require the field if the Field1 or Field2 is present:
+ Usage: required_with=Field1 Field2
+
+Required With All
+
+The field under validation must be present and not empty only if all
+of the other specified fields are present. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required_with_all
+
+Example:
+
+ // require the field if the Field1 and Field2 is present:
+ Usage: required_with_all=Field1 Field2
+
+Required Without
+
+The field under validation must be present and not empty only when any
+of the other specified fields are not present. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required_without
+
+Examples:
+
+ // require the field if the Field1 is not present:
+ Usage: required_without=Field1
+
+ // require the field if the Field1 or Field2 is not present:
+ Usage: required_without=Field1 Field2
+
+Required Without All
+
+The field under validation must be present and not empty only when all
+of the other specified fields are not present. For strings ensures value is
+not "". For slices, maps, pointers, interfaces, channels and functions
+ensures the value is not nil.
+
+ Usage: required_without_all
+
+Example:
+
+ // require the field if the Field1 and Field2 is not present:
+ Usage: required_without_all=Field1 Field2
+
+Is Default
+
+This validates that the value is the default value and is almost the
+opposite of required.
+
+ Usage: isdefault
+
+Length
+
+For numbers, length will ensure that the value is
+equal to the parameter given. For strings, it checks that
+the string length is exactly that number of characters. For slices,
+arrays, and maps, validates the number of items.
+
+Example #1
+
+ Usage: len=10
+
+Example #2 (time.Duration)
+
+For time.Duration, len will ensure that the value is equal to the duration given
+in the parameter.
+
+ Usage: len=1h30m
+
+Maximum
+
+For numbers, max will ensure that the value is
+less than or equal to the parameter given. For strings, it checks
+that the string length is at most that number of characters. For
+slices, arrays, and maps, validates the number of items.
+
+Example #1
+
+ Usage: max=10
+
+Example #2 (time.Duration)
+
+For time.Duration, max will ensure that the value is less than or equal to the
+duration given in the parameter.
+
+ Usage: max=1h30m
+
+Minimum
+
+For numbers, min will ensure that the value is
+greater or equal to the parameter given. For strings, it checks that
+the string length is at least that number of characters. For slices,
+arrays, and maps, validates the number of items.
+
+Example #1
+
+ Usage: min=10
+
+Example #2 (time.Duration)
+
+For time.Duration, min will ensure that the value is greater than or equal to
+the duration given in the parameter.
+
+ Usage: min=1h30m
+
+Equals
+
+For strings & numbers, eq will ensure that the value is
+equal to the parameter given. For slices, arrays, and maps,
+validates the number of items.
+
+Example #1
+
+ Usage: eq=10
+
+Example #2 (time.Duration)
+
+For time.Duration, eq will ensure that the value is equal to the duration given
+in the parameter.
+
+ Usage: eq=1h30m
+
+Not Equal
+
+For strings & numbers, ne will ensure that the value is not
+equal to the parameter given. For slices, arrays, and maps,
+validates the number of items.
+
+Example #1
+
+ Usage: ne=10
+
+Example #2 (time.Duration)
+
+For time.Duration, ne will ensure that the value is not equal to the duration
+given in the parameter.
+
+ Usage: ne=1h30m
+
+One Of
+
+For strings, ints, and uints, oneof will ensure that the value
+is one of the values in the parameter. The parameter should be
+a list of values separated by whitespace. Values may be
+strings or numbers. To match strings with spaces in them, include
+the target string between single quotes.
+
+ Usage: oneof=red green
+ oneof='red green' 'blue yellow'
+ oneof=5 7 9
+
+Greater Than
+
+For numbers, this will ensure that the value is greater than the
+parameter given. For strings, it checks that the string length
+is greater than that number of characters. For slices, arrays
+and maps it validates the number of items.
+
+Example #1
+
+ Usage: gt=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is greater than time.Now.UTC().
+
+ Usage: gt
+
+Example #3 (time.Duration)
+
+For time.Duration, gt will ensure that the value is greater than the duration
+given in the parameter.
+
+ Usage: gt=1h30m
+
+Greater Than or Equal
+
+Same as 'min' above. Kept both to make terminology with 'len' easier.
+
+Example #1
+
+ Usage: gte=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is greater than or equal to time.Now.UTC().
+
+ Usage: gte
+
+Example #3 (time.Duration)
+
+For time.Duration, gte will ensure that the value is greater than or equal to
+the duration given in the parameter.
+
+ Usage: gte=1h30m
+
+Less Than
+
+For numbers, this will ensure that the value is less than the parameter given.
+For strings, it checks that the string length is less than that number of
+characters. For slices, arrays, and maps it validates the number of items.
+
+Example #1
+
+ Usage: lt=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is less than time.Now.UTC().
+
+ Usage: lt
+
+Example #3 (time.Duration)
+
+For time.Duration, lt will ensure that the value is less than the duration given
+in the parameter.
+
+ Usage: lt=1h30m
+
+Less Than or Equal
+
+Same as 'max' above. Kept both to make terminology with 'len' easier.
+
+Example #1
+
+ Usage: lte=10
+
+Example #2 (time.Time)
+
+For time.Time ensures the time value is less than or equal to time.Now.UTC().
+
+ Usage: lte
+
+Example #3 (time.Duration)
+
+For time.Duration, lte will ensure that the value is less than or equal to the
+duration given in the parameter.
+
+ Usage: lte=1h30m
+
+Field Equals Another Field
+
+This will validate the field value against another fields value either within
+a struct or passed in field.
+
+Example #1:
+
+ // Validation on Password field using:
+ Usage: eqfield=ConfirmPassword
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(password, confirmpassword, "eqfield")
+
+Field Equals Another Field (relative)
+
+This does the same as eqfield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: eqcsfield=InnerStructField.Field)
+
+Field Does Not Equal Another Field
+
+This will validate the field value against another fields value either within
+a struct or passed in field.
+
+Examples:
+
+ // Confirm two colors are not the same:
+ //
+ // Validation on Color field:
+ Usage: nefield=Color2
+
+ // Validating by field:
+ validate.VarWithValue(color1, color2, "nefield")
+
+Field Does Not Equal Another Field (relative)
+
+This does the same as nefield except that it validates the field provided
+relative to the top level struct.
+
+ Usage: necsfield=InnerStructField.Field
+
+Field Greater Than Another Field
+
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(gtfield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(start, end, "gtfield")
+
+Field Greater Than Another Relative Field
+
+This does the same as gtfield except that it validates the field provided
+relative to the top level struct.
+
+ Usage: gtcsfield=InnerStructField.Field
+
+Field Greater Than or Equal To Another Field
+
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(gtefield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(start, end, "gtefield")
+
+Field Greater Than or Equal To Another Relative Field
+
+This does the same as gtefield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: gtecsfield=InnerStructField.Field
+
+Less Than Another Field
+
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(ltfield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(start, end, "ltfield")
+
+Less Than Another Relative Field
+
+This does the same as ltfield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: ltcsfield=InnerStructField.Field
+
+Less Than or Equal To Another Field
+
+Only valid for Numbers, time.Duration and time.Time types, this will validate
+the field value against another fields value either within a struct or passed in
+field. usage examples are for validation of a Start and End date:
+
+Example #1:
+
+ // Validation on End field using:
+ validate.Struct Usage(ltefield=Start)
+
+Example #2:
+
+ // Validating by field:
+ validate.VarWithValue(start, end, "ltefield")
+
+Less Than or Equal To Another Relative Field
+
+This does the same as ltefield except that it validates the field provided relative
+to the top level struct.
+
+ Usage: ltecsfield=InnerStructField.Field
+
+Field Contains Another Field
+
+This does the same as contains except for struct fields. It should only be used
+with string types. See the behavior of reflect.Value.String() for behavior on
+other types.
+
+ Usage: containsfield=InnerStructField.Field
+
+Field Excludes Another Field
+
+This does the same as excludes except for struct fields. It should only be used
+with string types. See the behavior of reflect.Value.String() for behavior on
+other types.
+
+ Usage: excludesfield=InnerStructField.Field
+
+Unique
+
+For arrays & slices, unique will ensure that there are no duplicates.
+For maps, unique will ensure that there are no duplicate values.
+For slices of struct, unique will ensure that there are no duplicate values
+in a field of the struct specified via a parameter.
+
+ // For arrays, slices, and maps:
+ Usage: unique
+
+ // For slices of struct:
+ Usage: unique=field
+
+Alpha Only
+
+This validates that a string value contains ASCII alpha characters only
+
+ Usage: alpha
+
+Alphanumeric
+
+This validates that a string value contains ASCII alphanumeric characters only
+
+ Usage: alphanum
+
+Alpha Unicode
+
+This validates that a string value contains unicode alpha characters only
+
+ Usage: alphaunicode
+
+Alphanumeric Unicode
+
+This validates that a string value contains unicode alphanumeric characters only
+
+ Usage: alphanumunicode
+
+Number
+
+This validates that a string value contains number values only.
+For integers or float it returns true.
+
+ Usage: number
+
+Numeric
+
+This validates that a string value contains a basic numeric value.
+basic excludes exponents etc...
+for integers or float it returns true.
+
+ Usage: numeric
+
+Hexadecimal String
+
+This validates that a string value contains a valid hexadecimal.
+
+ Usage: hexadecimal
+
+Hexcolor String
+
+This validates that a string value contains a valid hex color including
+hashtag (#)
+
+ Usage: hexcolor
+
+Lowercase String
+
+This validates that a string value contains only lowercase characters. An empty string is not a valid lowercase string.
+
+ Usage: lowercase
+
+Uppercase String
+
+This validates that a string value contains only uppercase characters. An empty string is not a valid uppercase string.
+
+ Usage: uppercase
+
+RGB String
+
+This validates that a string value contains a valid rgb color
+
+ Usage: rgb
+
+RGBA String
+
+This validates that a string value contains a valid rgba color
+
+ Usage: rgba
+
+HSL String
+
+This validates that a string value contains a valid hsl color
+
+ Usage: hsl
+
+HSLA String
+
+This validates that a string value contains a valid hsla color
+
+ Usage: hsla
+
+E.164 Phone Number String
+
+This validates that a string value contains a valid E.164 Phone number
+https://en.wikipedia.org/wiki/E.164 (ex. +1123456789)
+
+ Usage: e164
+
+E-mail String
+
+This validates that a string value contains a valid email
+This may not conform to all possibilities of any rfc standard, but neither
+does any email provider accept all possibilities.
+
+ Usage: email
+
+JSON String
+
+This validates that a string value is valid JSON
+
+ Usage: json
+
+File path
+
+This validates that a string value contains a valid file path and that
+the file exists on the machine.
+This is done using os.Stat, which is a platform independent function.
+
+ Usage: file
+
+URL String
+
+This validates that a string value contains a valid url
+This will accept any url the golang request uri accepts but must contain
+a schema for example http:// or rtmp://
+
+ Usage: url
+
+URI String
+
+This validates that a string value contains a valid uri
+This will accept any uri the golang request uri accepts
+
+ Usage: uri
+
+Urn RFC 2141 String
+
+This validataes that a string value contains a valid URN
+according to the RFC 2141 spec.
+
+ Usage: urn_rfc2141
+
+Base64 String
+
+This validates that a string value contains a valid base64 value.
+Although an empty string is valid base64 this will report an empty string
+as an error, if you wish to accept an empty string as valid you can use
+this with the omitempty tag.
+
+ Usage: base64
+
+Base64URL String
+
+This validates that a string value contains a valid base64 URL safe value
+according the the RFC4648 spec.
+Although an empty string is a valid base64 URL safe value, this will report
+an empty string as an error, if you wish to accept an empty string as valid
+you can use this with the omitempty tag.
+
+ Usage: base64url
+
+Bitcoin Address
+
+This validates that a string value contains a valid bitcoin address.
+The format of the string is checked to ensure it matches one of the three formats
+P2PKH, P2SH and performs checksum validation.
+
+ Usage: btc_addr
+
+Bitcoin Bech32 Address (segwit)
+
+This validates that a string value contains a valid bitcoin Bech32 address as defined
+by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki)
+Special thanks to Pieter Wuille for providng reference implementations.
+
+ Usage: btc_addr_bech32
+
+Ethereum Address
+
+This validates that a string value contains a valid ethereum address.
+The format of the string is checked to ensure it matches the standard Ethereum address format.
+
+ Usage: eth_addr
+
+Contains
+
+This validates that a string value contains the substring value.
+
+ Usage: contains=@
+
+Contains Any
+
+This validates that a string value contains any Unicode code points
+in the substring value.
+
+ Usage: containsany=!@#?
+
+Contains Rune
+
+This validates that a string value contains the supplied rune value.
+
+ Usage: containsrune=@
+
+Excludes
+
+This validates that a string value does not contain the substring value.
+
+ Usage: excludes=@
+
+Excludes All
+
+This validates that a string value does not contain any Unicode code
+points in the substring value.
+
+ Usage: excludesall=!@#?
+
+Excludes Rune
+
+This validates that a string value does not contain the supplied rune value.
+
+ Usage: excludesrune=@
+
+Starts With
+
+This validates that a string value starts with the supplied string value
+
+ Usage: startswith=hello
+
+Ends With
+
+This validates that a string value ends with the supplied string value
+
+ Usage: endswith=goodbye
+
+Does Not Start With
+
+This validates that a string value does not start with the supplied string value
+
+ Usage: startsnotwith=hello
+
+Does Not End With
+
+This validates that a string value does not end with the supplied string value
+
+ Usage: endsnotwith=goodbye
+
+International Standard Book Number
+
+This validates that a string value contains a valid isbn10 or isbn13 value.
+
+ Usage: isbn
+
+International Standard Book Number 10
+
+This validates that a string value contains a valid isbn10 value.
+
+ Usage: isbn10
+
+International Standard Book Number 13
+
+This validates that a string value contains a valid isbn13 value.
+
+ Usage: isbn13
+
+Universally Unique Identifier UUID
+
+This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead.
+
+ Usage: uuid
+
+Universally Unique Identifier UUID v3
+
+This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead.
+
+ Usage: uuid3
+
+Universally Unique Identifier UUID v4
+
+This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead.
+
+ Usage: uuid4
+
+Universally Unique Identifier UUID v5
+
+This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead.
+
+ Usage: uuid5
+
+ASCII
+
+This validates that a string value contains only ASCII characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: ascii
+
+Printable ASCII
+
+This validates that a string value contains only printable ASCII characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: printascii
+
+Multi-Byte Characters
+
+This validates that a string value contains one or more multibyte characters.
+NOTE: if the string is blank, this validates as true.
+
+ Usage: multibyte
+
+Data URL
+
+This validates that a string value contains a valid DataURI.
+NOTE: this will also validate that the data portion is valid base64
+
+ Usage: datauri
+
+Latitude
+
+This validates that a string value contains a valid latitude.
+
+ Usage: latitude
+
+Longitude
+
+This validates that a string value contains a valid longitude.
+
+ Usage: longitude
+
+Social Security Number SSN
+
+This validates that a string value contains a valid U.S. Social Security Number.
+
+ Usage: ssn
+
+Internet Protocol Address IP
+
+This validates that a string value contains a valid IP Address.
+
+ Usage: ip
+
+Internet Protocol Address IPv4
+
+This validates that a string value contains a valid v4 IP Address.
+
+ Usage: ipv4
+
+Internet Protocol Address IPv6
+
+This validates that a string value contains a valid v6 IP Address.
+
+ Usage: ipv6
+
+Classless Inter-Domain Routing CIDR
+
+This validates that a string value contains a valid CIDR Address.
+
+ Usage: cidr
+
+Classless Inter-Domain Routing CIDRv4
+
+This validates that a string value contains a valid v4 CIDR Address.
+
+ Usage: cidrv4
+
+Classless Inter-Domain Routing CIDRv6
+
+This validates that a string value contains a valid v6 CIDR Address.
+
+ Usage: cidrv6
+
+Transmission Control Protocol Address TCP
+
+This validates that a string value contains a valid resolvable TCP Address.
+
+ Usage: tcp_addr
+
+Transmission Control Protocol Address TCPv4
+
+This validates that a string value contains a valid resolvable v4 TCP Address.
+
+ Usage: tcp4_addr
+
+Transmission Control Protocol Address TCPv6
+
+This validates that a string value contains a valid resolvable v6 TCP Address.
+
+ Usage: tcp6_addr
+
+User Datagram Protocol Address UDP
+
+This validates that a string value contains a valid resolvable UDP Address.
+
+ Usage: udp_addr
+
+User Datagram Protocol Address UDPv4
+
+This validates that a string value contains a valid resolvable v4 UDP Address.
+
+ Usage: udp4_addr
+
+User Datagram Protocol Address UDPv6
+
+This validates that a string value contains a valid resolvable v6 UDP Address.
+
+ Usage: udp6_addr
+
+Internet Protocol Address IP
+
+This validates that a string value contains a valid resolvable IP Address.
+
+ Usage: ip_addr
+
+Internet Protocol Address IPv4
+
+This validates that a string value contains a valid resolvable v4 IP Address.
+
+ Usage: ip4_addr
+
+Internet Protocol Address IPv6
+
+This validates that a string value contains a valid resolvable v6 IP Address.
+
+ Usage: ip6_addr
+
+Unix domain socket end point Address
+
+This validates that a string value contains a valid Unix Address.
+
+ Usage: unix_addr
+
+Media Access Control Address MAC
+
+This validates that a string value contains a valid MAC Address.
+
+ Usage: mac
+
+Note: See Go's ParseMAC for accepted formats and types:
+
+ http://golang.org/src/net/mac.go?s=866:918#L29
+
+Hostname RFC 952
+
+This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952
+
+ Usage: hostname
+
+Hostname RFC 1123
+
+This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123
+
+ Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias.
+
+Full Qualified Domain Name (FQDN)
+
+This validates that a string value contains a valid FQDN.
+
+ Usage: fqdn
+
+HTML Tags
+
+This validates that a string value appears to be an HTML element tag
+including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element
+
+ Usage: html
+
+HTML Encoded
+
+This validates that a string value is a proper character reference in decimal
+or hexadecimal format
+
+ Usage: html_encoded
+
+URL Encoded
+
+This validates that a string value is percent-encoded (URL encoded) according
+to https://tools.ietf.org/html/rfc3986#section-2.1
+
+ Usage: url_encoded
+
+Directory
+
+This validates that a string value contains a valid directory and that
+it exists on the machine.
+This is done using os.Stat, which is a platform independent function.
+
+ Usage: dir
+
+HostPort
+
+This validates that a string value contains a valid DNS hostname and port that
+can be used to valiate fields typically passed to sockets and connections.
+
+ Usage: hostname_port
+
+Datetime
+
+This validates that a string value is a valid datetime based on the supplied datetime format.
+Supplied format must match the official Go time format layout as documented in https://golang.org/pkg/time/
+
+ Usage: datetime=2006-01-02
+
+Iso3166-1 alpha-2
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha2
+
+Iso3166-1 alpha-3
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha3
+
+Iso3166-1 alpha-numeric
+
+This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard.
+see: https://www.iso.org/iso-3166-country-codes.html
+
+ Usage: iso3166_1_alpha3
+
+TimeZone
+
+This validates that a string value is a valid time zone based on the time zone database present on the system.
+Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator.
+More information on https://golang.org/pkg/time/#LoadLocation
+
+ Usage: timezone
+
+
+Alias Validators and Tags
+
+NOTE: When returning an error, the tag returned in "FieldError" will be
+the alias tag unless the dive tag is part of the alias. Everything after the
+dive tag is not reported as the alias tag. Also, the "ActualTag" in the before
+case will be the actual tag within the alias that failed.
+
+Here is a list of the current built in alias tags:
+
+ "iscolor"
+ alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor)
+ "country_code"
+ alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code)
+
+Validator notes:
+
+ regex
+ a regex validator won't be added because commas and = signs can be part
+ of a regex which conflict with the validation definitions. Although
+ workarounds can be made, they take away from using pure regex's.
+ Furthermore it's quick and dirty but the regex's become harder to
+ maintain and are not reusable, so it's as much a programming philosophy
+ as anything.
+
+ In place of this new validator functions should be created; a regex can
+ be used within the validator function and even be precompiled for better
+ efficiency within regexes.go.
+
+ And the best reason, you can submit a pull request and we can keep on
+ adding to the validation library of this package!
+
+Non standard validators
+
+A collection of validation rules that are frequently needed but are more
+complex than the ones found in the baked in validators.
+A non standard validator must be registered manually like you would
+with your own custom validation functions.
+
+Example of registration and use:
+
+ type Test struct {
+ TestField string `validate:"yourtag"`
+ }
+
+ t := &Test{
+ TestField: "Test"
+ }
+
+ validate := validator.New()
+ validate.RegisterValidation("yourtag", validators.NotBlank)
+
+Here is a list of the current non standard validators:
+
+ NotBlank
+ This validates that the value is not blank or with length zero.
+ For strings ensures they do not contain only spaces. For channels, maps, slices and arrays
+ ensures they don't have zero length. For others, a non empty value is required.
+
+ Usage: notblank
+
+Panics
+
+This package panics when bad input is provided, this is by design, bad code like
+that should not make it to production.
+
+ type Test struct {
+ TestField string `validate:"nonexistantfunction=1"`
+ }
+
+ t := &Test{
+ TestField: "Test"
+ }
+
+ validate.Struct(t) // this will panic
+*/
+package validator
diff --git a/vendor/github.com/go-playground/validator/v10/errors.go b/vendor/github.com/go-playground/validator/v10/errors.go
new file mode 100644
index 0000000..63293cf
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/errors.go
@@ -0,0 +1,275 @@
+package validator
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+
+ ut "github.com/go-playground/universal-translator"
+)
+
+const (
+ fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag"
+)
+
+// ValidationErrorsTranslations is the translation return type
+type ValidationErrorsTranslations map[string]string
+
+// InvalidValidationError describes an invalid argument passed to
+// `Struct`, `StructExcept`, StructPartial` or `Field`
+type InvalidValidationError struct {
+ Type reflect.Type
+}
+
+// Error returns InvalidValidationError message
+func (e *InvalidValidationError) Error() string {
+
+ if e.Type == nil {
+ return "validator: (nil)"
+ }
+
+ return "validator: (nil " + e.Type.String() + ")"
+}
+
+// ValidationErrors is an array of FieldError's
+// for use in custom error messages post validation.
+type ValidationErrors []FieldError
+
+// Error is intended for use in development + debugging and not intended to be a production error message.
+// It allows ValidationErrors to subscribe to the Error interface.
+// All information to create an error message specific to your application is contained within
+// the FieldError found within the ValidationErrors array
+func (ve ValidationErrors) Error() string {
+
+ buff := bytes.NewBufferString("")
+
+ var fe *fieldError
+
+ for i := 0; i < len(ve); i++ {
+
+ fe = ve[i].(*fieldError)
+ buff.WriteString(fe.Error())
+ buff.WriteString("\n")
+ }
+
+ return strings.TrimSpace(buff.String())
+}
+
+// Translate translates all of the ValidationErrors
+func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations {
+
+ trans := make(ValidationErrorsTranslations)
+
+ var fe *fieldError
+
+ for i := 0; i < len(ve); i++ {
+ fe = ve[i].(*fieldError)
+
+ // // in case an Anonymous struct was used, ensure that the key
+ // // would be 'Username' instead of ".Username"
+ // if len(fe.ns) > 0 && fe.ns[:1] == "." {
+ // trans[fe.ns[1:]] = fe.Translate(ut)
+ // continue
+ // }
+
+ trans[fe.ns] = fe.Translate(ut)
+ }
+
+ return trans
+}
+
+// FieldError contains all functions to get error details
+type FieldError interface {
+
+ // returns the validation tag that failed. if the
+ // validation was an alias, this will return the
+ // alias name and not the underlying tag that failed.
+ //
+ // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
+ // will return "iscolor"
+ Tag() string
+
+ // returns the validation tag that failed, even if an
+ // alias the actual tag within the alias will be returned.
+ // If an 'or' validation fails the entire or will be returned.
+ //
+ // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla"
+ // will return "hexcolor|rgb|rgba|hsl|hsla"
+ ActualTag() string
+
+ // returns the namespace for the field error, with the tag
+ // name taking precedence over the field's actual name.
+ //
+ // eg. JSON name "User.fname"
+ //
+ // See StructNamespace() for a version that returns actual names.
+ //
+ // NOTE: this field can be blank when validating a single primitive field
+ // using validate.Field(...) as there is no way to extract it's name
+ Namespace() string
+
+ // returns the namespace for the field error, with the field's
+ // actual name.
+ //
+ // eq. "User.FirstName" see Namespace for comparison
+ //
+ // NOTE: this field can be blank when validating a single primitive field
+ // using validate.Field(...) as there is no way to extract its name
+ StructNamespace() string
+
+ // returns the fields name with the tag name taking precedence over the
+ // field's actual name.
+ //
+ // eq. JSON name "fname"
+ // see StructField for comparison
+ Field() string
+
+ // returns the field's actual name from the struct, when able to determine.
+ //
+ // eq. "FirstName"
+ // see Field for comparison
+ StructField() string
+
+ // returns the actual field's value in case needed for creating the error
+ // message
+ Value() interface{}
+
+ // returns the param value, in string form for comparison; this will also
+ // help with generating an error message
+ Param() string
+
+ // Kind returns the Field's reflect Kind
+ //
+ // eg. time.Time's kind is a struct
+ Kind() reflect.Kind
+
+ // Type returns the Field's reflect Type
+ //
+ // // eg. time.Time's type is time.Time
+ Type() reflect.Type
+
+ // returns the FieldError's translated error
+ // from the provided 'ut.Translator' and registered 'TranslationFunc'
+ //
+ // NOTE: if no registered translator can be found it returns the same as
+ // calling fe.Error()
+ Translate(ut ut.Translator) string
+
+ // Error returns the FieldError's message
+ Error() string
+}
+
+// compile time interface checks
+var _ FieldError = new(fieldError)
+var _ error = new(fieldError)
+
+// fieldError contains a single field's validation error along
+// with other properties that may be needed for error message creation
+// it complies with the FieldError interface
+type fieldError struct {
+ v *Validate
+ tag string
+ actualTag string
+ ns string
+ structNs string
+ fieldLen uint8
+ structfieldLen uint8
+ value interface{}
+ param string
+ kind reflect.Kind
+ typ reflect.Type
+}
+
+// Tag returns the validation tag that failed.
+func (fe *fieldError) Tag() string {
+ return fe.tag
+}
+
+// ActualTag returns the validation tag that failed, even if an
+// alias the actual tag within the alias will be returned.
+func (fe *fieldError) ActualTag() string {
+ return fe.actualTag
+}
+
+// Namespace returns the namespace for the field error, with the tag
+// name taking precedence over the field's actual name.
+func (fe *fieldError) Namespace() string {
+ return fe.ns
+}
+
+// StructNamespace returns the namespace for the field error, with the field's
+// actual name.
+func (fe *fieldError) StructNamespace() string {
+ return fe.structNs
+}
+
+// Field returns the field's name with the tag name taking precedence over the
+// field's actual name.
+func (fe *fieldError) Field() string {
+
+ return fe.ns[len(fe.ns)-int(fe.fieldLen):]
+ // // return fe.field
+ // fld := fe.ns[len(fe.ns)-int(fe.fieldLen):]
+
+ // log.Println("FLD:", fld)
+
+ // if len(fld) > 0 && fld[:1] == "." {
+ // return fld[1:]
+ // }
+
+ // return fld
+}
+
+// returns the field's actual name from the struct, when able to determine.
+func (fe *fieldError) StructField() string {
+ // return fe.structField
+ return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):]
+}
+
+// Value returns the actual field's value in case needed for creating the error
+// message
+func (fe *fieldError) Value() interface{} {
+ return fe.value
+}
+
+// Param returns the param value, in string form for comparison; this will
+// also help with generating an error message
+func (fe *fieldError) Param() string {
+ return fe.param
+}
+
+// Kind returns the Field's reflect Kind
+func (fe *fieldError) Kind() reflect.Kind {
+ return fe.kind
+}
+
+// Type returns the Field's reflect Type
+func (fe *fieldError) Type() reflect.Type {
+ return fe.typ
+}
+
+// Error returns the fieldError's error message
+func (fe *fieldError) Error() string {
+ return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag)
+}
+
+// Translate returns the FieldError's translated error
+// from the provided 'ut.Translator' and registered 'TranslationFunc'
+//
+// NOTE: if no registered translation can be found, it returns the original
+// untranslated error message.
+func (fe *fieldError) Translate(ut ut.Translator) string {
+
+ m, ok := fe.v.transTagFunc[ut]
+ if !ok {
+ return fe.Error()
+ }
+
+ fn, ok := m[fe.tag]
+ if !ok {
+ return fe.Error()
+ }
+
+ return fn(ut, fe)
+}
diff --git a/vendor/github.com/go-playground/validator/v10/field_level.go b/vendor/github.com/go-playground/validator/v10/field_level.go
new file mode 100644
index 0000000..f0e2a9a
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/field_level.go
@@ -0,0 +1,119 @@
+package validator
+
+import "reflect"
+
+// FieldLevel contains all the information and helper functions
+// to validate a field
+type FieldLevel interface {
+ // returns the top level struct, if any
+ Top() reflect.Value
+
+ // returns the current fields parent struct, if any or
+ // the comparison value if called 'VarWithValue'
+ Parent() reflect.Value
+
+ // returns current field for validation
+ Field() reflect.Value
+
+ // returns the field's name with the tag
+ // name taking precedence over the fields actual name.
+ FieldName() string
+
+ // returns the struct field's name
+ StructFieldName() string
+
+ // returns param for validation against current field
+ Param() string
+
+ // GetTag returns the current validations tag name
+ GetTag() string
+
+ // ExtractType gets the actual underlying type of field value.
+ // It will dive into pointers, customTypes and return you the
+ // underlying value and it's kind.
+ ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
+
+ // traverses the parent struct to retrieve a specific field denoted by the provided namespace
+ // in the param and returns the field, field kind and whether is was successful in retrieving
+ // the field at all.
+ //
+ // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
+ // could not be retrieved because it didn't exist.
+ //
+ // Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
+ GetStructFieldOK() (reflect.Value, reflect.Kind, bool)
+
+ // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+ // the field and namespace allowing more extensibility for validators.
+ //
+ // Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
+ GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool)
+
+ // traverses the parent struct to retrieve a specific field denoted by the provided namespace
+ // in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving
+ // the field at all.
+ //
+ // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
+ // could not be retrieved because it didn't exist.
+ GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool)
+
+ // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+ // the field and namespace allowing more extensibility for validators.
+ GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool)
+}
+
+var _ FieldLevel = new(validate)
+
+// Field returns current field for validation
+func (v *validate) Field() reflect.Value {
+ return v.flField
+}
+
+// FieldName returns the field's name with the tag
+// name taking precedence over the fields actual name.
+func (v *validate) FieldName() string {
+ return v.cf.altName
+}
+
+// GetTag returns the current validations tag name
+func (v *validate) GetTag() string {
+ return v.ct.tag
+}
+
+// StructFieldName returns the struct field's name
+func (v *validate) StructFieldName() string {
+ return v.cf.name
+}
+
+// Param returns param for validation against current field
+func (v *validate) Param() string {
+ return v.ct.param
+}
+
+// GetStructFieldOK returns Param returns param for validation against current field
+//
+// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable.
+func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) {
+ current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param)
+ return current, kind, found
+}
+
+// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+// the field and namespace allowing more extensibility for validators.
+//
+// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable.
+func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) {
+ current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace)
+ return current, kind, found
+}
+
+// GetStructFieldOK returns Param returns param for validation against current field
+func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) {
+ return v.getStructFieldOKInternal(v.slflParent, v.ct.param)
+}
+
+// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for
+// the field and namespace allowing more extensibility for validators.
+func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) {
+ return v.getStructFieldOKInternal(val, namespace)
+}
diff --git a/vendor/github.com/go-playground/validator/v10/go.mod b/vendor/github.com/go-playground/validator/v10/go.mod
new file mode 100644
index 0000000..d457100
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/go.mod
@@ -0,0 +1,11 @@
+module github.com/go-playground/validator/v10
+
+go 1.13
+
+require (
+ github.com/go-playground/assert/v2 v2.0.1
+ github.com/go-playground/locales v0.13.0
+ github.com/go-playground/universal-translator v0.17.0
+ github.com/leodido/go-urn v1.2.0
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
+)
diff --git a/vendor/github.com/go-playground/validator/v10/go.sum b/vendor/github.com/go-playground/validator/v10/go.sum
new file mode 100644
index 0000000..0152642
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/go.sum
@@ -0,0 +1,28 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/go-playground/validator/v10/logo.png b/vendor/github.com/go-playground/validator/v10/logo.png
new file mode 100644
index 0000000..355000f
Binary files /dev/null and b/vendor/github.com/go-playground/validator/v10/logo.png differ
diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go
new file mode 100644
index 0000000..b741f4e
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/regexes.go
@@ -0,0 +1,101 @@
+package validator
+
+import "regexp"
+
+const (
+ alphaRegexString = "^[a-zA-Z]+$"
+ alphaNumericRegexString = "^[a-zA-Z0-9]+$"
+ alphaUnicodeRegexString = "^[\\p{L}]+$"
+ alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$"
+ numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$"
+ numberRegexString = "^[0-9]+$"
+ hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$"
+ hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
+ rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$"
+ rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
+ hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$"
+ hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$"
+ emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
+ e164RegexString = "^\\+[1-9]?[0-9]{7,14}$"
+ base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
+ base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$"
+ iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
+ iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
+ uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
+ uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
+ uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
+ uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
+ uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$"
+ uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$"
+ aSCIIRegexString = "^[\x00-\x7F]*$"
+ printableASCIIRegexString = "^[\x20-\x7E]*$"
+ multibyteRegexString = "[^\x00-\x7F]"
+ dataURIRegexString = `^data:((?:\w+\/(?:([^;]|;[^;]).)+)?)`
+ latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
+ longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
+ sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$`
+ hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952
+ hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123
+ fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62})(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.')
+ btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address
+ btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
+ btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32
+ ethAddressRegexString = `^0x[0-9a-fA-F]{40}$`
+ ethAddressUpperRegexString = `^0x[0-9A-F]{40}$`
+ ethAddressLowerRegexString = `^0x[0-9a-f]{40}$`
+ uRLEncodedRegexString = `(%[A-Fa-f0-9]{2})`
+ hTMLEncodedRegexString = `[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?`
+ hTMLRegexString = `<[/]?([a-zA-Z]+).*?>`
+ splitParamsRegexString = `'[^']*'|\S+`
+)
+
+var (
+ alphaRegex = regexp.MustCompile(alphaRegexString)
+ alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString)
+ alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString)
+ alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString)
+ numericRegex = regexp.MustCompile(numericRegexString)
+ numberRegex = regexp.MustCompile(numberRegexString)
+ hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString)
+ hexcolorRegex = regexp.MustCompile(hexcolorRegexString)
+ rgbRegex = regexp.MustCompile(rgbRegexString)
+ rgbaRegex = regexp.MustCompile(rgbaRegexString)
+ hslRegex = regexp.MustCompile(hslRegexString)
+ hslaRegex = regexp.MustCompile(hslaRegexString)
+ e164Regex = regexp.MustCompile(e164RegexString)
+ emailRegex = regexp.MustCompile(emailRegexString)
+ base64Regex = regexp.MustCompile(base64RegexString)
+ base64URLRegex = regexp.MustCompile(base64URLRegexString)
+ iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
+ iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
+ uUID3Regex = regexp.MustCompile(uUID3RegexString)
+ uUID4Regex = regexp.MustCompile(uUID4RegexString)
+ uUID5Regex = regexp.MustCompile(uUID5RegexString)
+ uUIDRegex = regexp.MustCompile(uUIDRegexString)
+ uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString)
+ uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString)
+ uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString)
+ uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString)
+ aSCIIRegex = regexp.MustCompile(aSCIIRegexString)
+ printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString)
+ multibyteRegex = regexp.MustCompile(multibyteRegexString)
+ dataURIRegex = regexp.MustCompile(dataURIRegexString)
+ latitudeRegex = regexp.MustCompile(latitudeRegexString)
+ longitudeRegex = regexp.MustCompile(longitudeRegexString)
+ sSNRegex = regexp.MustCompile(sSNRegexString)
+ hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952)
+ hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123)
+ fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123)
+ btcAddressRegex = regexp.MustCompile(btcAddressRegexString)
+ btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32)
+ btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32)
+ ethAddressRegex = regexp.MustCompile(ethAddressRegexString)
+ ethaddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString)
+ ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString)
+ uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString)
+ hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString)
+ hTMLRegex = regexp.MustCompile(hTMLRegexString)
+ splitParamsRegex = regexp.MustCompile(splitParamsRegexString)
+)
diff --git a/vendor/github.com/go-playground/validator/v10/struct_level.go b/vendor/github.com/go-playground/validator/v10/struct_level.go
new file mode 100644
index 0000000..57691ee
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/struct_level.go
@@ -0,0 +1,175 @@
+package validator
+
+import (
+ "context"
+ "reflect"
+)
+
+// StructLevelFunc accepts all values needed for struct level validation
+type StructLevelFunc func(sl StructLevel)
+
+// StructLevelFuncCtx accepts all values needed for struct level validation
+// but also allows passing of contextual validation information via context.Context.
+type StructLevelFuncCtx func(ctx context.Context, sl StructLevel)
+
+// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx
+func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx {
+ return func(ctx context.Context, sl StructLevel) {
+ fn(sl)
+ }
+}
+
+// StructLevel contains all the information and helper functions
+// to validate a struct
+type StructLevel interface {
+
+ // returns the main validation object, in case one wants to call validations internally.
+ // this is so you don't have to use anonymous functions to get access to the validate
+ // instance.
+ Validator() *Validate
+
+ // returns the top level struct, if any
+ Top() reflect.Value
+
+ // returns the current fields parent struct, if any
+ Parent() reflect.Value
+
+ // returns the current struct.
+ Current() reflect.Value
+
+ // ExtractType gets the actual underlying type of field value.
+ // It will dive into pointers, customTypes and return you the
+ // underlying value and its kind.
+ ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool)
+
+ // reports an error just by passing the field and tag information
+ //
+ // NOTES:
+ //
+ // fieldName and altName get appended to the existing namespace that
+ // validator is on. e.g. pass 'FirstName' or 'Names[0]' depending
+ // on the nesting
+ //
+ // tag can be an existing validation tag or just something you make up
+ // and process on the flip side it's up to you.
+ ReportError(field interface{}, fieldName, structFieldName string, tag, param string)
+
+ // reports an error just by passing ValidationErrors
+ //
+ // NOTES:
+ //
+ // relativeNamespace and relativeActualNamespace get appended to the
+ // existing namespace that validator is on.
+ // e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending
+ // on the nesting. most of the time they will be blank, unless you validate
+ // at a level lower the the current field depth
+ ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors)
+}
+
+var _ StructLevel = new(validate)
+
+// Top returns the top level struct
+//
+// NOTE: this can be the same as the current struct being validated
+// if not is a nested struct.
+//
+// this is only called when within Struct and Field Level validation and
+// should not be relied upon for an acurate value otherwise.
+func (v *validate) Top() reflect.Value {
+ return v.top
+}
+
+// Parent returns the current structs parent
+//
+// NOTE: this can be the same as the current struct being validated
+// if not is a nested struct.
+//
+// this is only called when within Struct and Field Level validation and
+// should not be relied upon for an acurate value otherwise.
+func (v *validate) Parent() reflect.Value {
+ return v.slflParent
+}
+
+// Current returns the current struct.
+func (v *validate) Current() reflect.Value {
+ return v.slCurrent
+}
+
+// Validator returns the main validation object, in case one want to call validations internally.
+func (v *validate) Validator() *Validate {
+ return v.v
+}
+
+// ExtractType gets the actual underlying type of field value.
+func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) {
+ return v.extractTypeInternal(field, false)
+}
+
+// ReportError reports an error just by passing the field and tag information
+func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) {
+
+ fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false)
+
+ if len(structFieldName) == 0 {
+ structFieldName = fieldName
+ }
+
+ v.str1 = string(append(v.ns, fieldName...))
+
+ if v.v.hasTagNameFunc || fieldName != structFieldName {
+ v.str2 = string(append(v.actualNs, structFieldName...))
+ } else {
+ v.str2 = v.str1
+ }
+
+ if kind == reflect.Invalid {
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: tag,
+ actualTag: tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(fieldName)),
+ structfieldLen: uint8(len(structFieldName)),
+ param: param,
+ kind: kind,
+ },
+ )
+ return
+ }
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: tag,
+ actualTag: tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(fieldName)),
+ structfieldLen: uint8(len(structFieldName)),
+ value: fv.Interface(),
+ param: param,
+ kind: kind,
+ typ: fv.Type(),
+ },
+ )
+}
+
+// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation.
+//
+// NOTE: this function prepends the current namespace to the relative ones.
+func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) {
+
+ var err *fieldError
+
+ for i := 0; i < len(errs); i++ {
+
+ err = errs[i].(*fieldError)
+ err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...))
+ err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...))
+
+ v.errs = append(v.errs, err)
+ }
+}
diff --git a/vendor/github.com/go-playground/validator/v10/translations.go b/vendor/github.com/go-playground/validator/v10/translations.go
new file mode 100644
index 0000000..4d9d75c
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/translations.go
@@ -0,0 +1,11 @@
+package validator
+
+import ut "github.com/go-playground/universal-translator"
+
+// TranslationFunc is the function type used to register or override
+// custom translations
+type TranslationFunc func(ut ut.Translator, fe FieldError) string
+
+// RegisterTranslationsFunc allows for registering of translations
+// for a 'ut.Translator' for use within the 'TranslationFunc'
+type RegisterTranslationsFunc func(ut ut.Translator) error
diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go
new file mode 100644
index 0000000..56420f4
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/util.go
@@ -0,0 +1,288 @@
+package validator
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// extractTypeInternal gets the actual underlying type of field value.
+// It will dive into pointers, customTypes and return you the
+// underlying value and it's kind.
+func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) {
+
+BEGIN:
+ switch current.Kind() {
+ case reflect.Ptr:
+
+ nullable = true
+
+ if current.IsNil() {
+ return current, reflect.Ptr, nullable
+ }
+
+ current = current.Elem()
+ goto BEGIN
+
+ case reflect.Interface:
+
+ nullable = true
+
+ if current.IsNil() {
+ return current, reflect.Interface, nullable
+ }
+
+ current = current.Elem()
+ goto BEGIN
+
+ case reflect.Invalid:
+ return current, reflect.Invalid, nullable
+
+ default:
+
+ if v.v.hasCustomFuncs {
+
+ if fn, ok := v.v.customFuncs[current.Type()]; ok {
+ current = reflect.ValueOf(fn(current))
+ goto BEGIN
+ }
+ }
+
+ return current, current.Kind(), nullable
+ }
+}
+
+// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and
+// returns the field, field kind and whether is was successful in retrieving the field at all.
+//
+// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field
+// could not be retrieved because it didn't exist.
+func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) {
+
+BEGIN:
+ current, kind, nullable = v.ExtractType(val)
+ if kind == reflect.Invalid {
+ return
+ }
+
+ if namespace == "" {
+ found = true
+ return
+ }
+
+ switch kind {
+
+ case reflect.Ptr, reflect.Interface:
+ return
+
+ case reflect.Struct:
+
+ typ := current.Type()
+ fld := namespace
+ var ns string
+
+ if typ != timeType {
+
+ idx := strings.Index(namespace, namespaceSeparator)
+
+ if idx != -1 {
+ fld = namespace[:idx]
+ ns = namespace[idx+1:]
+ } else {
+ ns = ""
+ }
+
+ bracketIdx := strings.Index(fld, leftBracket)
+ if bracketIdx != -1 {
+ fld = fld[:bracketIdx]
+
+ ns = namespace[bracketIdx:]
+ }
+
+ val = current.FieldByName(fld)
+ namespace = ns
+ goto BEGIN
+ }
+
+ case reflect.Array, reflect.Slice:
+ idx := strings.Index(namespace, leftBracket)
+ idx2 := strings.Index(namespace, rightBracket)
+
+ arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2])
+
+ if arrIdx >= current.Len() {
+ return
+ }
+
+ startIdx := idx2 + 1
+
+ if startIdx < len(namespace) {
+ if namespace[startIdx:startIdx+1] == namespaceSeparator {
+ startIdx++
+ }
+ }
+
+ val = current.Index(arrIdx)
+ namespace = namespace[startIdx:]
+ goto BEGIN
+
+ case reflect.Map:
+ idx := strings.Index(namespace, leftBracket) + 1
+ idx2 := strings.Index(namespace, rightBracket)
+
+ endIdx := idx2
+
+ if endIdx+1 < len(namespace) {
+ if namespace[endIdx+1:endIdx+2] == namespaceSeparator {
+ endIdx++
+ }
+ }
+
+ key := namespace[idx:idx2]
+
+ switch current.Type().Key().Kind() {
+ case reflect.Int:
+ i, _ := strconv.Atoi(key)
+ val = current.MapIndex(reflect.ValueOf(i))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Int8:
+ i, _ := strconv.ParseInt(key, 10, 8)
+ val = current.MapIndex(reflect.ValueOf(int8(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Int16:
+ i, _ := strconv.ParseInt(key, 10, 16)
+ val = current.MapIndex(reflect.ValueOf(int16(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Int32:
+ i, _ := strconv.ParseInt(key, 10, 32)
+ val = current.MapIndex(reflect.ValueOf(int32(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Int64:
+ i, _ := strconv.ParseInt(key, 10, 64)
+ val = current.MapIndex(reflect.ValueOf(i))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint:
+ i, _ := strconv.ParseUint(key, 10, 0)
+ val = current.MapIndex(reflect.ValueOf(uint(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint8:
+ i, _ := strconv.ParseUint(key, 10, 8)
+ val = current.MapIndex(reflect.ValueOf(uint8(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint16:
+ i, _ := strconv.ParseUint(key, 10, 16)
+ val = current.MapIndex(reflect.ValueOf(uint16(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint32:
+ i, _ := strconv.ParseUint(key, 10, 32)
+ val = current.MapIndex(reflect.ValueOf(uint32(i)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Uint64:
+ i, _ := strconv.ParseUint(key, 10, 64)
+ val = current.MapIndex(reflect.ValueOf(i))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Float32:
+ f, _ := strconv.ParseFloat(key, 32)
+ val = current.MapIndex(reflect.ValueOf(float32(f)))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Float64:
+ f, _ := strconv.ParseFloat(key, 64)
+ val = current.MapIndex(reflect.ValueOf(f))
+ namespace = namespace[endIdx+1:]
+
+ case reflect.Bool:
+ b, _ := strconv.ParseBool(key)
+ val = current.MapIndex(reflect.ValueOf(b))
+ namespace = namespace[endIdx+1:]
+
+ // reflect.Type = string
+ default:
+ val = current.MapIndex(reflect.ValueOf(key))
+ namespace = namespace[endIdx+1:]
+ }
+
+ goto BEGIN
+ }
+
+ // if got here there was more namespace, cannot go any deeper
+ panic("Invalid field namespace")
+}
+
+// asInt returns the parameter as a int64
+// or panics if it can't convert
+func asInt(param string) int64 {
+ i, err := strconv.ParseInt(param, 0, 64)
+ panicIf(err)
+
+ return i
+}
+
+// asIntFromTimeDuration parses param as time.Duration and returns it as int64
+// or panics on error.
+func asIntFromTimeDuration(param string) int64 {
+ d, err := time.ParseDuration(param)
+ if err != nil {
+ // attempt parsing as an an integer assuming nanosecond precision
+ return asInt(param)
+ }
+ return int64(d)
+}
+
+// asIntFromType calls the proper function to parse param as int64,
+// given a field's Type t.
+func asIntFromType(t reflect.Type, param string) int64 {
+ switch t {
+ case timeDurationType:
+ return asIntFromTimeDuration(param)
+ default:
+ return asInt(param)
+ }
+}
+
+// asUint returns the parameter as a uint64
+// or panics if it can't convert
+func asUint(param string) uint64 {
+
+ i, err := strconv.ParseUint(param, 0, 64)
+ panicIf(err)
+
+ return i
+}
+
+// asFloat returns the parameter as a float64
+// or panics if it can't convert
+func asFloat(param string) float64 {
+
+ i, err := strconv.ParseFloat(param, 64)
+ panicIf(err)
+
+ return i
+}
+
+// asBool returns the parameter as a bool
+// or panics if it can't convert
+func asBool(param string) bool {
+
+ i, err := strconv.ParseBool(param)
+ panicIf(err)
+
+ return i
+}
+
+func panicIf(err error) {
+ if err != nil {
+ panic(err.Error())
+ }
+}
diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go
new file mode 100644
index 0000000..f097f39
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/validator.go
@@ -0,0 +1,477 @@
+package validator
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// per validate construct
+type validate struct {
+ v *Validate
+ top reflect.Value
+ ns []byte
+ actualNs []byte
+ errs ValidationErrors
+ includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise
+ ffn FilterFunc
+ slflParent reflect.Value // StructLevel & FieldLevel
+ slCurrent reflect.Value // StructLevel & FieldLevel
+ flField reflect.Value // StructLevel & FieldLevel
+ cf *cField // StructLevel & FieldLevel
+ ct *cTag // StructLevel & FieldLevel
+ misc []byte // misc reusable
+ str1 string // misc reusable
+ str2 string // misc reusable
+ fldIsPointer bool // StructLevel & FieldLevel
+ isPartial bool
+ hasExcludes bool
+}
+
+// parent and current will be the same the first run of validateStruct
+func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) {
+
+ cs, ok := v.v.structCache.Get(typ)
+ if !ok {
+ cs = v.v.extractStructCache(current, typ.Name())
+ }
+
+ if len(ns) == 0 && len(cs.name) != 0 {
+
+ ns = append(ns, cs.name...)
+ ns = append(ns, '.')
+
+ structNs = append(structNs, cs.name...)
+ structNs = append(structNs, '.')
+ }
+
+ // ct is nil on top level struct, and structs as fields that have no tag info
+ // so if nil or if not nil and the structonly tag isn't present
+ if ct == nil || ct.typeof != typeStructOnly {
+
+ var f *cField
+
+ for i := 0; i < len(cs.fields); i++ {
+
+ f = cs.fields[i]
+
+ if v.isPartial {
+
+ if v.ffn != nil {
+ // used with StructFiltered
+ if v.ffn(append(structNs, f.name...)) {
+ continue
+ }
+
+ } else {
+ // used with StructPartial & StructExcept
+ _, ok = v.includeExclude[string(append(structNs, f.name...))]
+
+ if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) {
+ continue
+ }
+ }
+ }
+
+ v.traverseField(ctx, parent, current.Field(f.idx), ns, structNs, f, f.cTags)
+ }
+ }
+
+ // check if any struct level validations, after all field validations already checked.
+ // first iteration will have no info about nostructlevel tag, and is checked prior to
+ // calling the next iteration of validateStruct called from traverseField.
+ if cs.fn != nil {
+
+ v.slflParent = parent
+ v.slCurrent = current
+ v.ns = ns
+ v.actualNs = structNs
+
+ cs.fn(ctx, v)
+ }
+}
+
+// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options
+func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) {
+ var typ reflect.Type
+ var kind reflect.Kind
+
+ current, kind, v.fldIsPointer = v.extractTypeInternal(current, false)
+
+ switch kind {
+ case reflect.Ptr, reflect.Interface, reflect.Invalid:
+
+ if ct == nil {
+ return
+ }
+
+ if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault {
+ return
+ }
+
+ if ct.hasTag {
+ if kind == reflect.Invalid {
+ v.str1 = string(append(ns, cf.altName...))
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ param: ct.param,
+ kind: kind,
+ },
+ )
+ return
+ }
+
+ v.str1 = string(append(ns, cf.altName...))
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+ if !ct.runValidationWhenNil {
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: current.Type(),
+ },
+ )
+ return
+ }
+ }
+
+ case reflect.Struct:
+
+ typ = current.Type()
+
+ if typ != timeType {
+
+ if ct != nil {
+
+ if ct.typeof == typeStructOnly {
+ goto CONTINUE
+ } else if ct.typeof == typeIsDefault {
+ // set Field Level fields
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ if !ct.fn(ctx, v) {
+ v.str1 = string(append(ns, cf.altName...))
+
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: typ,
+ },
+ )
+ return
+ }
+ }
+
+ ct = ct.next
+ }
+
+ if ct != nil && ct.typeof == typeNoStructLevel {
+ return
+ }
+
+ CONTINUE:
+ // if len == 0 then validating using 'Var' or 'VarWithValue'
+ // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm...
+ // VarWithField - this allows for validating against each field within the struct against a specific value
+ // pretty handy in certain situations
+ if len(cf.name) > 0 {
+ ns = append(append(ns, cf.altName...), '.')
+ structNs = append(append(structNs, cf.name...), '.')
+ }
+
+ v.validateStruct(ctx, current, current, typ, ns, structNs, ct)
+ return
+ }
+ }
+
+ if !ct.hasTag {
+ return
+ }
+
+ typ = current.Type()
+
+OUTER:
+ for {
+ if ct == nil {
+ return
+ }
+
+ switch ct.typeof {
+
+ case typeOmitEmpty:
+
+ // set Field Level fields
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ if !hasValue(v) {
+ return
+ }
+
+ ct = ct.next
+ continue
+
+ case typeEndKeys:
+ return
+
+ case typeDive:
+
+ ct = ct.next
+
+ // traverse slice or map here
+ // or panic ;)
+ switch kind {
+ case reflect.Slice, reflect.Array:
+
+ var i64 int64
+ reusableCF := &cField{}
+
+ for i := 0; i < current.Len(); i++ {
+
+ i64 = int64(i)
+
+ v.misc = append(v.misc[0:0], cf.name...)
+ v.misc = append(v.misc, '[')
+ v.misc = strconv.AppendInt(v.misc, i64, 10)
+ v.misc = append(v.misc, ']')
+
+ reusableCF.name = string(v.misc)
+
+ if cf.namesEqual {
+ reusableCF.altName = reusableCF.name
+ } else {
+
+ v.misc = append(v.misc[0:0], cf.altName...)
+ v.misc = append(v.misc, '[')
+ v.misc = strconv.AppendInt(v.misc, i64, 10)
+ v.misc = append(v.misc, ']')
+
+ reusableCF.altName = string(v.misc)
+ }
+ v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct)
+ }
+
+ case reflect.Map:
+
+ var pv string
+ reusableCF := &cField{}
+
+ for _, key := range current.MapKeys() {
+
+ pv = fmt.Sprintf("%v", key.Interface())
+
+ v.misc = append(v.misc[0:0], cf.name...)
+ v.misc = append(v.misc, '[')
+ v.misc = append(v.misc, pv...)
+ v.misc = append(v.misc, ']')
+
+ reusableCF.name = string(v.misc)
+
+ if cf.namesEqual {
+ reusableCF.altName = reusableCF.name
+ } else {
+ v.misc = append(v.misc[0:0], cf.altName...)
+ v.misc = append(v.misc, '[')
+ v.misc = append(v.misc, pv...)
+ v.misc = append(v.misc, ']')
+
+ reusableCF.altName = string(v.misc)
+ }
+
+ if ct != nil && ct.typeof == typeKeys && ct.keys != nil {
+ v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys)
+ // can be nil when just keys being validated
+ if ct.next != nil {
+ v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next)
+ }
+ } else {
+ v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct)
+ }
+ }
+
+ default:
+ // throw error, if not a slice or map then should not have gotten here
+ // bad dive tag
+ panic("dive error! can't dive on a non slice or map")
+ }
+
+ return
+
+ case typeOr:
+
+ v.misc = v.misc[0:0]
+
+ for {
+
+ // set Field Level fields
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ if ct.fn(ctx, v) {
+
+ // drain rest of the 'or' values, then continue or leave
+ for {
+
+ ct = ct.next
+
+ if ct == nil {
+ return
+ }
+
+ if ct.typeof != typeOr {
+ continue OUTER
+ }
+ }
+ }
+
+ v.misc = append(v.misc, '|')
+ v.misc = append(v.misc, ct.tag...)
+
+ if ct.hasParam {
+ v.misc = append(v.misc, '=')
+ v.misc = append(v.misc, ct.param...)
+ }
+
+ if ct.isBlockEnd || ct.next == nil {
+ // if we get here, no valid 'or' value and no more tags
+ v.str1 = string(append(ns, cf.altName...))
+
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+
+ if ct.hasAlias {
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.actualAliasTag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: typ,
+ },
+ )
+
+ } else {
+
+ tVal := string(v.misc)[1:]
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: tVal,
+ actualTag: tVal,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: typ,
+ },
+ )
+ }
+
+ return
+ }
+
+ ct = ct.next
+ }
+
+ default:
+
+ // set Field Level fields
+ v.slflParent = parent
+ v.flField = current
+ v.cf = cf
+ v.ct = ct
+
+ if !ct.fn(ctx, v) {
+
+ v.str1 = string(append(ns, cf.altName...))
+
+ if v.v.hasTagNameFunc {
+ v.str2 = string(append(structNs, cf.name...))
+ } else {
+ v.str2 = v.str1
+ }
+
+ v.errs = append(v.errs,
+ &fieldError{
+ v: v.v,
+ tag: ct.aliasTag,
+ actualTag: ct.tag,
+ ns: v.str1,
+ structNs: v.str2,
+ fieldLen: uint8(len(cf.altName)),
+ structfieldLen: uint8(len(cf.name)),
+ value: current.Interface(),
+ param: ct.param,
+ kind: kind,
+ typ: typ,
+ },
+ )
+
+ return
+ }
+ ct = ct.next
+ }
+ }
+
+}
diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go
new file mode 100644
index 0000000..fe6a487
--- /dev/null
+++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go
@@ -0,0 +1,619 @@
+package validator
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "time"
+
+ ut "github.com/go-playground/universal-translator"
+)
+
+const (
+ defaultTagName = "validate"
+ utf8HexComma = "0x2C"
+ utf8Pipe = "0x7C"
+ tagSeparator = ","
+ orSeparator = "|"
+ tagKeySeparator = "="
+ structOnlyTag = "structonly"
+ noStructLevelTag = "nostructlevel"
+ omitempty = "omitempty"
+ isdefault = "isdefault"
+ requiredWithoutAllTag = "required_without_all"
+ requiredWithoutTag = "required_without"
+ requiredWithTag = "required_with"
+ requiredWithAllTag = "required_with_all"
+ requiredIfTag = "required_if"
+ requiredUnlessTag = "required_unless"
+ skipValidationTag = "-"
+ diveTag = "dive"
+ keysTag = "keys"
+ endKeysTag = "endkeys"
+ requiredTag = "required"
+ namespaceSeparator = "."
+ leftBracket = "["
+ rightBracket = "]"
+ restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}"
+ restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
+ restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation"
+)
+
+var (
+ timeDurationType = reflect.TypeOf(time.Duration(0))
+ timeType = reflect.TypeOf(time.Time{})
+
+ defaultCField = &cField{namesEqual: true}
+)
+
+// FilterFunc is the type used to filter fields using
+// StructFiltered(...) function.
+// returning true results in the field being filtered/skiped from
+// validation
+type FilterFunc func(ns []byte) bool
+
+// CustomTypeFunc allows for overriding or adding custom field type handler functions
+// field = field value of the type to return a value to be validated
+// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29
+type CustomTypeFunc func(field reflect.Value) interface{}
+
+// TagNameFunc allows for adding of a custom tag name parser
+type TagNameFunc func(field reflect.StructField) string
+
+type internalValidationFuncWrapper struct {
+ fn FuncCtx
+ runValidatinOnNil bool
+}
+
+// Validate contains the validator settings and cache
+type Validate struct {
+ tagName string
+ pool *sync.Pool
+ hasCustomFuncs bool
+ hasTagNameFunc bool
+ tagNameFunc TagNameFunc
+ structLevelFuncs map[reflect.Type]StructLevelFuncCtx
+ customFuncs map[reflect.Type]CustomTypeFunc
+ aliases map[string]string
+ validations map[string]internalValidationFuncWrapper
+ transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc
+ tagCache *tagCache
+ structCache *structCache
+}
+
+// New returns a new instance of 'validate' with sane defaults.
+func New() *Validate {
+
+ tc := new(tagCache)
+ tc.m.Store(make(map[string]*cTag))
+
+ sc := new(structCache)
+ sc.m.Store(make(map[reflect.Type]*cStruct))
+
+ v := &Validate{
+ tagName: defaultTagName,
+ aliases: make(map[string]string, len(bakedInAliases)),
+ validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)),
+ tagCache: tc,
+ structCache: sc,
+ }
+
+ // must copy alias validators for separate validations to be used in each validator instance
+ for k, val := range bakedInAliases {
+ v.RegisterAlias(k, val)
+ }
+
+ // must copy validators for separate validations to be used in each instance
+ for k, val := range bakedInValidators {
+
+ switch k {
+ // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour
+ case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag:
+ _ = v.registerValidation(k, wrapFunc(val), true, true)
+ default:
+ // no need to error check here, baked in will always be valid
+ _ = v.registerValidation(k, wrapFunc(val), true, false)
+ }
+ }
+
+ v.pool = &sync.Pool{
+ New: func() interface{} {
+ return &validate{
+ v: v,
+ ns: make([]byte, 0, 64),
+ actualNs: make([]byte, 0, 64),
+ misc: make([]byte, 32),
+ }
+ },
+ }
+
+ return v
+}
+
+// SetTagName allows for changing of the default tag name of 'validate'
+func (v *Validate) SetTagName(name string) {
+ v.tagName = name
+}
+
+// RegisterTagNameFunc registers a function to get alternate names for StructFields.
+//
+// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names:
+//
+// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
+// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
+// if name == "-" {
+// return ""
+// }
+// return name
+// })
+func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) {
+ v.tagNameFunc = fn
+ v.hasTagNameFunc = true
+}
+
+// RegisterValidation adds a validation with the given tag
+//
+// NOTES:
+// - if the key already exists, the previous validation function will be replaced.
+// - this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error {
+ return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...)
+}
+
+// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation
+// allowing context.Context validation support.
+func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error {
+ var nilCheckable bool
+ if len(callValidationEvenIfNull) > 0 {
+ nilCheckable = callValidationEvenIfNull[0]
+ }
+ return v.registerValidation(tag, fn, false, nilCheckable)
+}
+
+func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error {
+ if len(tag) == 0 {
+ return errors.New("Function Key cannot be empty")
+ }
+
+ if fn == nil {
+ return errors.New("Function cannot be empty")
+ }
+
+ _, ok := restrictedTags[tag]
+ if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) {
+ panic(fmt.Sprintf(restrictedTagErr, tag))
+ }
+ v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable}
+ return nil
+}
+
+// RegisterAlias registers a mapping of a single validation tag that
+// defines a common or complex set of validation(s) to simplify adding validation
+// to structs.
+//
+// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterAlias(alias, tags string) {
+
+ _, ok := restrictedTags[alias]
+
+ if ok || strings.ContainsAny(alias, restrictedTagChars) {
+ panic(fmt.Sprintf(restrictedAliasErr, alias))
+ }
+
+ v.aliases[alias] = tags
+}
+
+// RegisterStructValidation registers a StructLevelFunc against a number of types.
+//
+// NOTE:
+// - this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) {
+ v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...)
+}
+
+// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing
+// of contextual validation information via context.Context.
+//
+// NOTE:
+// - this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) {
+
+ if v.structLevelFuncs == nil {
+ v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx)
+ }
+
+ for _, t := range types {
+ tv := reflect.ValueOf(t)
+ if tv.Kind() == reflect.Ptr {
+ t = reflect.Indirect(tv).Interface()
+ }
+
+ v.structLevelFuncs[reflect.TypeOf(t)] = fn
+ }
+}
+
+// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types
+//
+// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation
+func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) {
+
+ if v.customFuncs == nil {
+ v.customFuncs = make(map[reflect.Type]CustomTypeFunc)
+ }
+
+ for _, t := range types {
+ v.customFuncs[reflect.TypeOf(t)] = fn
+ }
+
+ v.hasCustomFuncs = true
+}
+
+// RegisterTranslation registers translations against the provided tag.
+func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) {
+
+ if v.transTagFunc == nil {
+ v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc)
+ }
+
+ if err = registerFn(trans); err != nil {
+ return
+ }
+
+ m, ok := v.transTagFunc[trans]
+ if !ok {
+ m = make(map[string]TranslationFunc)
+ v.transTagFunc[trans] = m
+ }
+
+ m[tag] = translationFn
+
+ return
+}
+
+// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified.
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) Struct(s interface{}) error {
+ return v.StructCtx(context.Background(), s)
+}
+
+// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified
+// and also allows passing of context.Context for contextual validation information.
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) {
+
+ val := reflect.ValueOf(s)
+ top := val
+
+ if val.Kind() == reflect.Ptr && !val.IsNil() {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct || val.Type() == timeType {
+ return &InvalidValidationError{Type: reflect.TypeOf(s)}
+ }
+
+ // good to validate
+ vd := v.pool.Get().(*validate)
+ vd.top = top
+ vd.isPartial = false
+ // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
+
+ vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+
+ v.pool.Put(vd)
+
+ return
+}
+
+// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates
+// nested structs, unless otherwise specified.
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error {
+ return v.StructFilteredCtx(context.Background(), s, fn)
+}
+
+// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates
+// nested structs, unless otherwise specified and also allows passing of contextual validation information via
+// context.Context
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) {
+ val := reflect.ValueOf(s)
+ top := val
+
+ if val.Kind() == reflect.Ptr && !val.IsNil() {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct || val.Type() == timeType {
+ return &InvalidValidationError{Type: reflect.TypeOf(s)}
+ }
+
+ // good to validate
+ vd := v.pool.Get().(*validate)
+ vd.top = top
+ vd.isPartial = true
+ vd.ffn = fn
+ // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept
+
+ vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+
+ v.pool.Put(vd)
+
+ return
+}
+
+// StructPartial validates the fields passed in only, ignoring all others.
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructPartial(s interface{}, fields ...string) error {
+ return v.StructPartialCtx(context.Background(), s, fields...)
+}
+
+// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual
+// validation validation information via context.Context
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
+ val := reflect.ValueOf(s)
+ top := val
+
+ if val.Kind() == reflect.Ptr && !val.IsNil() {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct || val.Type() == timeType {
+ return &InvalidValidationError{Type: reflect.TypeOf(s)}
+ }
+
+ // good to validate
+ vd := v.pool.Get().(*validate)
+ vd.top = top
+ vd.isPartial = true
+ vd.ffn = nil
+ vd.hasExcludes = false
+ vd.includeExclude = make(map[string]struct{})
+
+ typ := val.Type()
+ name := typ.Name()
+
+ for _, k := range fields {
+
+ flds := strings.Split(k, namespaceSeparator)
+ if len(flds) > 0 {
+
+ vd.misc = append(vd.misc[0:0], name...)
+ vd.misc = append(vd.misc, '.')
+
+ for _, s := range flds {
+
+ idx := strings.Index(s, leftBracket)
+
+ if idx != -1 {
+ for idx != -1 {
+ vd.misc = append(vd.misc, s[:idx]...)
+ vd.includeExclude[string(vd.misc)] = struct{}{}
+
+ idx2 := strings.Index(s, rightBracket)
+ idx2++
+ vd.misc = append(vd.misc, s[idx:idx2]...)
+ vd.includeExclude[string(vd.misc)] = struct{}{}
+ s = s[idx2:]
+ idx = strings.Index(s, leftBracket)
+ }
+ } else {
+
+ vd.misc = append(vd.misc, s...)
+ vd.includeExclude[string(vd.misc)] = struct{}{}
+ }
+
+ vd.misc = append(vd.misc, '.')
+ }
+ }
+ }
+
+ vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+
+ v.pool.Put(vd)
+
+ return
+}
+
+// StructExcept validates all fields except the ones passed in.
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructExcept(s interface{}, fields ...string) error {
+ return v.StructExceptCtx(context.Background(), s, fields...)
+}
+
+// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual
+// validation validation information via context.Context
+// Fields may be provided in a namespaced fashion relative to the struct provided
+// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) {
+ val := reflect.ValueOf(s)
+ top := val
+
+ if val.Kind() == reflect.Ptr && !val.IsNil() {
+ val = val.Elem()
+ }
+
+ if val.Kind() != reflect.Struct || val.Type() == timeType {
+ return &InvalidValidationError{Type: reflect.TypeOf(s)}
+ }
+
+ // good to validate
+ vd := v.pool.Get().(*validate)
+ vd.top = top
+ vd.isPartial = true
+ vd.ffn = nil
+ vd.hasExcludes = true
+ vd.includeExclude = make(map[string]struct{})
+
+ typ := val.Type()
+ name := typ.Name()
+
+ for _, key := range fields {
+
+ vd.misc = vd.misc[0:0]
+
+ if len(name) > 0 {
+ vd.misc = append(vd.misc, name...)
+ vd.misc = append(vd.misc, '.')
+ }
+
+ vd.misc = append(vd.misc, key...)
+ vd.includeExclude[string(vd.misc)] = struct{}{}
+ }
+
+ vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+
+ v.pool.Put(vd)
+
+ return
+}
+
+// Var validates a single variable using tag style validation.
+// eg.
+// var i int
+// validate.Var(i, "gt=1,lt=10")
+//
+// WARNING: a struct can be passed for validation eg. time.Time is a struct or
+// if you have a custom type and have registered a custom type handler, so must
+// allow it; however unforeseen validations will occur if trying to validate a
+// struct that is meant to be passed to 'validate.Struct'
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) Var(field interface{}, tag string) error {
+ return v.VarCtx(context.Background(), field, tag)
+}
+
+// VarCtx validates a single variable using tag style validation and allows passing of contextual
+// validation validation information via context.Context.
+// eg.
+// var i int
+// validate.Var(i, "gt=1,lt=10")
+//
+// WARNING: a struct can be passed for validation eg. time.Time is a struct or
+// if you have a custom type and have registered a custom type handler, so must
+// allow it; however unforeseen validations will occur if trying to validate a
+// struct that is meant to be passed to 'validate.Struct'
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) {
+ if len(tag) == 0 || tag == skipValidationTag {
+ return nil
+ }
+
+ ctag := v.fetchCacheTag(tag)
+ val := reflect.ValueOf(field)
+ vd := v.pool.Get().(*validate)
+ vd.top = val
+ vd.isPartial = false
+ vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+ v.pool.Put(vd)
+ return
+}
+
+// VarWithValue validates a single variable, against another variable/field's value using tag style validation
+// eg.
+// s1 := "abcd"
+// s2 := "abcd"
+// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
+//
+// WARNING: a struct can be passed for validation eg. time.Time is a struct or
+// if you have a custom type and have registered a custom type handler, so must
+// allow it; however unforeseen validations will occur if trying to validate a
+// struct that is meant to be passed to 'validate.Struct'
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error {
+ return v.VarWithValueCtx(context.Background(), field, other, tag)
+}
+
+// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and
+// allows passing of contextual validation validation information via context.Context.
+// eg.
+// s1 := "abcd"
+// s2 := "abcd"
+// validate.VarWithValue(s1, s2, "eqcsfield") // returns true
+//
+// WARNING: a struct can be passed for validation eg. time.Time is a struct or
+// if you have a custom type and have registered a custom type handler, so must
+// allow it; however unforeseen validations will occur if trying to validate a
+// struct that is meant to be passed to 'validate.Struct'
+//
+// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise.
+// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors.
+// validate Array, Slice and maps fields which may contain more than one error
+func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) {
+ if len(tag) == 0 || tag == skipValidationTag {
+ return nil
+ }
+ ctag := v.fetchCacheTag(tag)
+ otherVal := reflect.ValueOf(other)
+ vd := v.pool.Get().(*validate)
+ vd.top = otherVal
+ vd.isPartial = false
+ vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag)
+
+ if len(vd.errs) > 0 {
+ err = vd.errs
+ vd.errs = nil
+ }
+ v.pool.Put(vd)
+ return
+}
diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore
new file mode 100644
index 0000000..b975a7b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.gitignore
@@ -0,0 +1,3 @@
+*.rdb
+testdata/*/
+.idea/
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
new file mode 100644
index 0000000..d15b5ac
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml
@@ -0,0 +1,26 @@
+run:
+ concurrency: 8
+ deadline: 5m
+ tests: false
+linters:
+ enable-all: true
+ disable:
+ - funlen
+ - gochecknoglobals
+ - gochecknoinits
+ - gocognit
+ - goconst
+ - godox
+ - gosec
+ - maligned
+ - wsl
+ - gomnd
+ - goerr113
+ - exhaustive
+ - nestif
+ - nlreturn
+ - exhaustivestruct
+ - wrapcheck
+ - errorlint
+ - cyclop
+ - forcetypeassert
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc b/vendor/github.com/go-redis/redis/v8/.prettierrc
new file mode 100644
index 0000000..8b7f044
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.prettierrc
@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
new file mode 100644
index 0000000..63aabd3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
@@ -0,0 +1,143 @@
+# Changelog
+
+> :heart:
+> [**Uptrace.dev** - All-in-one tool to optimize performance and monitor errors & logs](https://uptrace.dev)
+
+## v8.11
+
+- Remove OpenTelemetry metrics.
+- Supports more redis commands and options.
+
+## v8.10
+
+- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
+ single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
+ decision:
+
+ - Traces become smaller and less noisy.
+ - It may be costly to process those 3 extra spans for each query.
+ - go-redis no longer depends on OpenTelemetry.
+
+ Eventually we hope to replace the information that we no longer collect with OpenTelemetry
+ Metrics.
+
+## v8.9
+
+- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
+ `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
+
+## v8.8
+
+- To make updating easier, extra modules now have the same version as go-redis does. That means that
+ you need to update your imports:
+
+```
+github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
+github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
+```
+
+## v8.5
+
+- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
+ struct:
+
+```go
+err := rdb.HGetAll(ctx, "hash").Scan(&data)
+
+err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
+```
+
+- Please check [redismock](https://github.com/go-redis/redismock) by
+ [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
+
+## v8
+
+- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
+ using `context.Context` yet, the simplest option is to define global package variable
+ `var ctx = context.TODO()` and use it when `ctx` is required.
+
+- Full support for `context.Context` canceling.
+
+- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
+
+- Added `redisext.OpenTemetryHook` that adds
+ [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
+
+- Redis slow log support.
+
+- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
+ existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
+
+```go
+import "github.com/golang/groupcache/consistenthash"
+
+ring := redis.NewRing(&redis.RingOptions{
+ NewConsistentHash: func() {
+ return consistenthash.New(100, crc32.ChecksumIEEE)
+ },
+})
+```
+
+- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
+- `Options.MaxRetries` default value is changed from 0 to 3.
+
+- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
+
+## v7.3
+
+- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
+ URL contains username.
+
+## v7.2
+
+- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
+
+## v7.1
+
+- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
+ interface.
+
+## v7
+
+- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
+ transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error
+ when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
+ detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
+ the time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+- `HMSet` is deprecated as of Redis v4.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
+ `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
+ occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
+ Servers that don't have cluster mode enabled. See
+ https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE
new file mode 100644
index 0000000..298bed9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
new file mode 100644
index 0000000..b16709c
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/Makefile
@@ -0,0 +1,26 @@
+test: testdeps
+ go test ./...
+ go test ./... -short -race
+ go test ./... -run=NONE -bench=. -benchmem
+ env GOOS=linux GOARCH=386 go test ./...
+ go vet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+ go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+ mkdir -p $@
+ wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+ cd $< && make all
+
+tag:
+ git tag $(VERSION)
+ git tag extra/rediscmd/$(VERSION)
+ git tag extra/redisotel/$(VERSION)
+ git tag extra/rediscensus/$(VERSION)
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
new file mode 100644
index 0000000..2c36f78
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/README.md
@@ -0,0 +1,176 @@
+
+
+
+## News
+
+ * v2.7.0 [released](https://github.com/go-resty/resty/releases/tag/v2.7.0) and tagged on Nov 03, 2021.
+ * v2.0.0 [released](https://github.com/go-resty/resty/releases/tag/v2.0.0) and tagged on Jul 16, 2019.
+ * v1.12.0 [released](https://github.com/go-resty/resty/releases/tag/v1.12.0) and tagged on Feb 27, 2019.
+ * v1.0 released and tagged on Sep 25, 2017. - Resty's first version was released on Sep 15, 2015 then it grew gradually as a very handy and helpful library. Its been a two years since first release. I'm very thankful to Resty users and its [contributors](https://github.com/go-resty/resty/graphs/contributors).
+
+## Features
+
+ * GET, POST, PUT, DELETE, HEAD, PATCH, OPTIONS, etc.
+ * Simple and chainable methods for settings and request
+ * [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request) Body can be `string`, `[]byte`, `struct`, `map`, `slice` and `io.Reader` too
+ * Auto detects `Content-Type`
+ * Buffer less processing for `io.Reader`
+ * Native `*http.Request` instance may be accessed during middleware and request execution via `Request.RawRequest`
+ * Request Body can be read multiple times via `Request.RawRequest.GetBody()`
+ * [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Response) object gives you more possibility
+ * Access as `[]byte` array - `response.Body()` OR Access as `string` - `response.String()`
+ * Know your `response.Time()` and when we `response.ReceivedAt()`
+ * Automatic marshal and unmarshal for `JSON` and `XML` content type
+ * Default is `JSON`, if you supply `struct/map` without header `Content-Type`
+ * For auto-unmarshal, refer to -
+ - Success scenario [Request.SetResult()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetResult) and [Response.Result()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Result).
+ - Error scenario [Request.SetError()](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetError) and [Response.Error()](https://pkg.go.dev/github.com/go-resty/resty/v2#Response.Error).
+ - Supports [RFC7807](https://tools.ietf.org/html/rfc7807) - `application/problem+json` & `application/problem+xml`
+ * Resty provides an option to override [JSON Marshal/Unmarshal and XML Marshal/Unmarshal](#override-json--xml-marshalunmarshal)
+ * Easy to upload one or more file(s) via `multipart/form-data`
+ * Auto detects file content type
+ * Request URL [Path Params (aka URI Params)](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetPathParams)
+ * Backoff Retry Mechanism with retry condition function [reference](retry_test.go)
+ * Resty client HTTP & REST [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnBeforeRequest) and [Response](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.OnAfterResponse) middlewares
+ * `Request.SetContext` supported
+ * Authorization option of `BasicAuth` and `Bearer` token
+ * Set request `ContentLength` value for all request or particular request
+ * Custom [Root Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetRootCertificate) and Client [Certificates](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetCertificates)
+ * Download/Save HTTP response directly into File, like `curl -o` flag. See [SetOutputDirectory](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetOutputDirectory) & [SetOutput](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.SetOutput).
+ * Cookies for your request and CookieJar support
+ * SRV Record based request instead of Host URL
+ * Client settings like `Timeout`, `RedirectPolicy`, `Proxy`, `TLSClientConfig`, `Transport`, etc.
+ * Optionally allows GET request with payload, see [SetAllowGetMethodPayload](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetAllowGetMethodPayload)
+ * Supports registering external JSON library into resty, see [how to use](https://github.com/go-resty/resty/issues/76#issuecomment-314015250)
+ * Exposes Response reader without reading response (no auto-unmarshaling) if need be, see [how to use](https://github.com/go-resty/resty/issues/87#issuecomment-322100604)
+ * Option to specify expected `Content-Type` when response `Content-Type` header missing. Refer to [#92](https://github.com/go-resty/resty/issues/92)
+ * Resty design
+ * Have client level settings & options and also override at Request level if you want to
+ * Request and Response middleware
+ * Create Multiple clients if you want to `resty.New()`
+ * Supports `http.RoundTripper` implementation, see [SetTransport](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.SetTransport)
+ * goroutine concurrent safe
+ * Resty Client trace, see [Client.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.EnableTrace) and [Request.EnableTrace](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.EnableTrace)
+ * Since v2.4.0, trace info contains a `RequestAttempt` value, and the `Request` object contains an `Attempt` attribute
+ * Debug mode - clean and informative logging presentation
+ * Gzip - Go does it automatically also resty has fallback handling too
+ * Works fine with `HTTP/2` and `HTTP/1.1`
+ * [Bazel support](#bazel-support)
+ * Easily mock Resty for testing, [for e.g.](#mocking-http-requests-using-httpmock-library)
+ * Well tested client library
+
+### Included Batteries
+
+ * Redirect Policies - see [how to use](#redirect-policy)
+ * NoRedirectPolicy
+ * FlexibleRedirectPolicy
+ * DomainCheckRedirectPolicy
+ * etc. [more info](redirect.go)
+ * Retry Mechanism [how to use](#retries)
+ * Backoff Retry
+ * Conditional Retry
+ * Since v2.6.0, Retry Hooks - [Client](https://pkg.go.dev/github.com/go-resty/resty/v2#Client.AddRetryHook), [Request](https://pkg.go.dev/github.com/go-resty/resty/v2#Request.AddRetryHook)
+ * SRV Record based request instead of Host URL [how to use](resty_test.go#L1412)
+ * etc (upcoming - throw your idea's [here](https://github.com/go-resty/resty/issues)).
+
+
+#### Supported Go Versions
+
+Initially Resty started supporting `go modules` since `v1.10.0` release.
+
+Starting Resty v2 and higher versions, it fully embraces [go modules](https://github.com/golang/go/wiki/Modules) package release. It requires a Go version capable of understanding `/vN` suffixed imports:
+
+- 1.9.7+
+- 1.10.3+
+- 1.11+
+
+
+## It might be beneficial for your project :smile:
+
+Resty author also published following projects for Go Community.
+
+ * [aah framework](https://aahframework.org) - A secure, flexible, rapid Go web framework.
+ * [THUMBAI](https://thumbai.app) - Go Mod Repository, Go Vanity Service and Simple Proxy Server.
+ * [go-model](https://github.com/jeevatkm/go-model) - Robust & Easy to use model mapper and utility methods for Go `struct`.
+
+
+## Installation
+
+```bash
+# Go Modules
+require github.com/go-resty/resty/v2 v2.7.0
+```
+
+## Usage
+
+The following samples will assist you to become as comfortable as possible with resty library.
+
+```go
+// Import resty into your code and refer it as `resty`.
+import "github.com/go-resty/resty/v2"
+```
+
+#### Simple GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ EnableTrace().
+ Get("https://httpbin.org/get")
+
+// Explore response object
+fmt.Println("Response Info:")
+fmt.Println(" Error :", err)
+fmt.Println(" Status Code:", resp.StatusCode())
+fmt.Println(" Status :", resp.Status())
+fmt.Println(" Proto :", resp.Proto())
+fmt.Println(" Time :", resp.Time())
+fmt.Println(" Received At:", resp.ReceivedAt())
+fmt.Println(" Body :\n", resp)
+fmt.Println()
+
+// Explore trace info
+fmt.Println("Request Trace Info:")
+ti := resp.Request.TraceInfo()
+fmt.Println(" DNSLookup :", ti.DNSLookup)
+fmt.Println(" ConnTime :", ti.ConnTime)
+fmt.Println(" TCPConnTime :", ti.TCPConnTime)
+fmt.Println(" TLSHandshake :", ti.TLSHandshake)
+fmt.Println(" ServerTime :", ti.ServerTime)
+fmt.Println(" ResponseTime :", ti.ResponseTime)
+fmt.Println(" TotalTime :", ti.TotalTime)
+fmt.Println(" IsConnReused :", ti.IsConnReused)
+fmt.Println(" IsConnWasIdle :", ti.IsConnWasIdle)
+fmt.Println(" ConnIdleTime :", ti.ConnIdleTime)
+fmt.Println(" RequestAttempt:", ti.RequestAttempt)
+fmt.Println(" RemoteAddr :", ti.RemoteAddr.String())
+
+/* Output
+Response Info:
+ Error :
+ Status Code: 200
+ Status : 200 OK
+ Proto : HTTP/2.0
+ Time : 457.034718ms
+ Received At: 2020-09-14 15:35:29.784681 -0700 PDT m=+0.458137045
+ Body :
+ {
+ "args": {},
+ "headers": {
+ "Accept-Encoding": "gzip",
+ "Host": "httpbin.org",
+ "User-Agent": "go-resty/2.4.0 (https://github.com/go-resty/resty)",
+ "X-Amzn-Trace-Id": "Root=1-5f5ff031-000ff6292204aa6898e4de49"
+ },
+ "origin": "0.0.0.0",
+ "url": "https://httpbin.org/get"
+ }
+
+Request Trace Info:
+ DNSLookup : 4.074657ms
+ ConnTime : 381.709936ms
+ TCPConnTime : 77.428048ms
+ TLSHandshake : 299.623597ms
+ ServerTime : 75.414703ms
+ ResponseTime : 79.337µs
+ TotalTime : 457.034718ms
+ IsConnReused : false
+ IsConnWasIdle : false
+ ConnIdleTime : 0s
+ RequestAttempt: 1
+ RemoteAddr : 3.221.81.55:443
+*/
+```
+
+#### Enhanced GET
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetQueryParams(map[string]string{
+ "page_no": "1",
+ "limit": "20",
+ "sort":"name",
+ "order": "asc",
+ "random":strconv.FormatInt(time.Now().Unix(), 10),
+ }).
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/search_result")
+
+
+// Sample of using Request.SetQueryString method
+resp, err := client.R().
+ SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more").
+ SetHeader("Accept", "application/json").
+ SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F").
+ Get("/show_product")
+
+
+// If necessary, you can force response content type to tell Resty to parse a JSON response into your struct
+resp, err := client.R().
+ SetResult(result).
+ ForceContentType("application/json").
+ Get("v2/alpine/manifests/latest")
+```
+
+#### Various POST method combinations
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// POST JSON string
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{"username":"testuser", "password":"testpass"}`).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST []byte array
+// No need to set content type, if you have client level setting
+resp, err := client.R().
+ SetHeader("Content-Type", "application/json").
+ SetBody([]byte(`{"username":"testuser", "password":"testpass"}`)).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ Post("https://myapp.com/login")
+
+// POST Struct, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(User{Username: "testuser", Password: "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST Map, default is JSON content type. No need to set one
+resp, err := client.R().
+ SetBody(map[string]interface{}{"username": "testuser", "password": "testpass"}).
+ SetResult(&AuthSuccess{}). // or SetResult(AuthSuccess{}).
+ SetError(&AuthError{}). // or SetError(AuthError{}).
+ Post("https://myapp.com/login")
+
+// POST of raw bytes for file upload. For example: upload file to Dropbox
+fileBytes, _ := ioutil.ReadFile("/Users/jeeva/mydocument.pdf")
+
+// See we are not setting content-type header, since go-resty automatically detects Content-Type for you
+resp, err := client.R().
+ SetBody(fileBytes).
+ SetContentLength(true). // Dropbox expects this value
+ SetAuthToken("").
+ SetError(&DropboxError{}). // or SetError(DropboxError{}).
+ Post("https://content.dropboxapi.com/1/files_put/auto/resty/mydocument.pdf") // for upload Dropbox supports PUT too
+
+// Note: resty detects Content-Type for request body/payload if content type header is not set.
+// * For struct and map data type defaults to 'application/json'
+// * Fallback is plain text content type
+```
+
+#### Sample PUT
+
+You can use various combinations of `PUT` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Title: "go-resty",
+ Content: "This is my article content, oh ya!",
+ Author: "Jeevanandam M",
+ Tags: []string{"article", "sample", "resty"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Put("https://myapp.com/article/1234")
+```
+
+#### Sample PATCH
+
+You can use various combinations of `PATCH` method call like demonstrated for `POST`.
+
+```go
+// Note: This is one sample of PUT method usage, refer POST for more combination
+
+// Create a Resty Client
+client := resty.New()
+
+// Request goes as JSON content type
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetBody(Article{
+ Tags: []string{"new tag1", "new tag2"},
+ }).
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Patch("https://myapp.com/articles/1234")
+```
+
+#### Sample DELETE, HEAD, OPTIONS
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// DELETE a article
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ Delete("https://myapp.com/articles/1234")
+
+// DELETE a articles with payload/body as a JSON string
+// No need to set auth token, error, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ SetError(&Error{}). // or SetError(Error{}).
+ SetHeader("Content-Type", "application/json").
+ SetBody(`{article_ids: [1002, 1006, 1007, 87683, 45432] }`).
+ Delete("https://myapp.com/articles")
+
+// HEAD of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Head("https://myapp.com/videos/hi-res-video")
+
+// OPTIONS of resource
+// No need to set auth token, if you have client level settings
+resp, err := client.R().
+ SetAuthToken("C6A79608-782F-4ED0-A11D-BD82FAD829CD").
+ Options("https://myapp.com/servers/nyc-dc-01")
+```
+
+#### Override JSON & XML Marshal/Unmarshal
+
+User could register choice of JSON/XML library into resty or write your own. By default resty registers standard `encoding/json` and `encoding/xml` respectively.
+```go
+// Example of registering json-iterator
+import jsoniter "github.com/json-iterator/go"
+
+json := jsoniter.ConfigCompatibleWithStandardLibrary
+
+client := resty.New()
+client.JSONMarshal = json.Marshal
+client.JSONUnmarshal = json.Unmarshal
+
+// similarly user could do for XML too with -
+client.XMLMarshal
+client.XMLUnmarshal
+```
+
+### Multipart File(s) upload
+
+#### Using io.Reader
+
+```go
+profileImgBytes, _ := ioutil.ReadFile("/Users/jeeva/test-img.png")
+notesBytes, _ := ioutil.ReadFile("/Users/jeeva/text-file.txt")
+
+// Create a Resty Client
+client := resty.New()
+
+resp, err := client.R().
+ SetFileReader("profile_img", "test-img.png", bytes.NewReader(profileImgBytes)).
+ SetFileReader("notes", "text-file.txt", bytes.NewReader(notesBytes)).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ }).
+ Post("http://myapp.com/upload")
+```
+
+#### Using File directly from Path
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Single file scenario
+resp, err := client.R().
+ SetFile("profile_img", "/Users/jeeva/test-img.png").
+ Post("http://myapp.com/upload")
+
+// Multiple files scenario
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ Post("http://myapp.com/upload")
+
+// Multipart of form fields and files
+resp, err := client.R().
+ SetFiles(map[string]string{
+ "profile_img": "/Users/jeeva/test-img.png",
+ "notes": "/Users/jeeva/text-file.txt",
+ }).
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "my city",
+ "access_token": "C6A79608-782F-4ED0-A11D-BD82FAD829CD",
+ }).
+ Post("http://myapp.com/profile")
+```
+
+#### Sample Form submission
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// just mentioning about POST as an example with simple flow
+// User Login
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "username": "jeeva",
+ "password": "mypass",
+ }).
+ Post("http://myapp.com/login")
+
+// Followed by profile update
+resp, err := client.R().
+ SetFormData(map[string]string{
+ "first_name": "Jeevanandam",
+ "last_name": "M",
+ "zip_code": "00001",
+ "city": "new city update",
+ }).
+ Post("http://myapp.com/profile")
+
+// Multi value form data
+criteria := url.Values{
+ "search_criteria": []string{"book", "glass", "pencil"},
+}
+resp, err := client.R().
+ SetFormDataFromValues(criteria).
+ Post("http://myapp.com/search")
+```
+
+#### Save HTTP Response into File
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting output directory path, If directory not exists then resty creates one!
+// This is optional one, if you're planning using absoule path in
+// `Request.SetOutput` and can used together.
+client.SetOutputDirectory("/Users/jeeva/Downloads")
+
+// HTTP response gets saved into file, similar to curl -o flag
+_, err := client.R().
+ SetOutput("plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+
+// OR using absolute path
+// Note: output directory path is not used for absolute path
+_, err := client.R().
+ SetOutput("/MyDownloads/plugin/ReplyWithHeader-v5.1-beta.zip").
+ Get("http://bit.ly/1LouEKr")
+```
+
+#### Request URL Path Params
+
+Resty provides easy to use dynamic request URL path params. Params can be set at client and request level. Client level params value can be overridden at request level.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.R().SetPathParams(map[string]string{
+ "userId": "sample@sample.com",
+ "subAccountId": "100002",
+}).
+Get("/v1/users/{userId}/{subAccountId}/details")
+
+// Result:
+// Composed URL - /v1/users/sample@sample.com/100002/details
+```
+
+#### Request and Response Middleware
+
+Resty provides middleware ability to manipulate for Request and Response. It is more flexible than callback approach.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Registering Request Middleware
+client.OnBeforeRequest(func(c *resty.Client, req *resty.Request) error {
+ // Now you have access to Client and current Request object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+
+// Registering Response Middleware
+client.OnAfterResponse(func(c *resty.Client, resp *resty.Response) error {
+ // Now you have access to Client and current Response object
+ // manipulate it as per your need
+
+ return nil // if its success otherwise return error
+ })
+```
+
+#### OnError Hooks
+
+Resty provides OnError hooks that may be called because:
+
+- The client failed to send the request due to connection timeout, TLS handshake failure, etc...
+- The request was retried the maximum amount of times, and still failed.
+
+If there was a response from the server, the original error will be wrapped in `*resty.ResponseError` which contains the last response received.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.OnError(func(req *resty.Request, err error) {
+ if v, ok := err.(*resty.ResponseError); ok {
+ // v.Response contains the last response from the server
+ // v.Err contains the original error
+ }
+ // Log the error, increment a metric, etc...
+})
+```
+
+#### Redirect Policy
+
+Resty provides few ready to use redirect policy(s) also it supports multiple policies together.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Assign Client Redirect Policy. Create one as per you need
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(15))
+
+// Wanna multiple policies such as redirect count, domain name check, etc
+client.SetRedirectPolicy(resty.FlexibleRedirectPolicy(20),
+ resty.DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+```
+
+##### Custom Redirect Policy
+
+Implement [RedirectPolicy](redirect.go#L20) interface and register it with resty client. Have a look [redirect.go](redirect.go) for more information.
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Using raw func into resty.SetRedirectPolicy
+client.SetRedirectPolicy(resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}))
+
+//---------------------------------------------------
+
+// Using struct create more flexible redirect policy
+type CustomRedirectPolicy struct {
+ // variables goes here
+}
+
+func (c *CustomRedirectPolicy) Apply(req *http.Request, via []*http.Request) error {
+ // Implement your logic here
+
+ // return nil for continue redirect otherwise return error to stop/prevent redirect
+ return nil
+}
+
+// Registering in resty
+client.SetRedirectPolicy(CustomRedirectPolicy{/* initialize variables */})
+```
+
+#### Custom Root Certificates and Client Certificates
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Custom Root certificates, just supply .pem file.
+// you can add one or more root certificates, its get appended
+client.SetRootCertificate("/path/to/root/pemFile1.pem")
+client.SetRootCertificate("/path/to/root/pemFile2.pem")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.LoadX509KeyPair("certs/client.pem", "certs/client.key")
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Custom Root Certificates and Client Certificates from string
+
+```go
+// Custom Root certificates from string
+// You can pass you certificates throught env variables as strings
+// you can add one or more root certificates, its get appended
+client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
+client.SetRootCertificateFromString("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----")
+// ... and so on!
+
+// Adding Client Certificates, you add one or more certificates
+// Sample for creating certificate object
+// Parsing public/private key pair from a pair of files. The files must contain PEM encoded data.
+cert1, err := tls.X509KeyPair([]byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"), []byte("-----BEGIN CERTIFICATE-----content-----END CERTIFICATE-----"))
+if err != nil {
+ log.Fatalf("ERROR client certificate: %s", err)
+}
+// ...
+
+// You add one or more certificates
+client.SetCertificates(cert1, cert2, cert3)
+```
+
+#### Proxy Settings - Client as well as at Request Level
+
+Default `Go` supports Proxy via environment variable `HTTP_PROXY`. Resty provides support via `SetProxy` & `RemoveProxy`.
+Choose as per your need.
+
+**Client Level Proxy** settings applied to all the request
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Setting a Proxy URL and Port
+client.SetProxy("http://proxyserver:8888")
+
+// Want to remove proxy setting
+client.RemoveProxy()
+```
+
+#### Retries
+
+Resty uses [backoff](http://www.awsarchitectureblog.com/2015/03/backoff.html)
+to increase retry intervals after each attempt.
+
+Usage example:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Retries are configured per client
+client.
+ // Set retry count to non zero to enable retries
+ SetRetryCount(3).
+ // You can override initial retry wait time.
+ // Default is 100 milliseconds.
+ SetRetryWaitTime(5 * time.Second).
+ // MaxWaitTime can be overridden as well.
+ // Default is 2 seconds.
+ SetRetryMaxWaitTime(20 * time.Second).
+ // SetRetryAfter sets callback to calculate wait time between retries.
+ // Default (nil) implies exponential backoff with jitter
+ SetRetryAfter(func(client *resty.Client, resp *resty.Response) (time.Duration, error) {
+ return 0, errors.New("quota exceeded")
+ })
+```
+
+Above setup will result in resty retrying requests returned non nil error up to
+3 times with delay increased after each attempt.
+
+You can optionally provide client with [custom retry conditions](https://pkg.go.dev/github.com/go-resty/resty/v2#RetryConditionFunc):
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+client.AddRetryCondition(
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ func(r *resty.Response, err error) bool {
+ return r.StatusCode() == http.StatusTooManyRequests
+ },
+)
+```
+
+Above example will make resty retry requests ended with `429 Too Many Requests`
+status code.
+
+Multiple retry conditions can be added.
+
+It is also possible to use `resty.Backoff(...)` to get arbitrary retry scenarios
+implemented. [Reference](retry_test.go).
+
+#### Allow GET request with Payload
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Allow GET request with Payload. This is disabled by default.
+client.SetAllowGetMethodPayload(true)
+```
+
+#### Wanna Multiple Clients
+
+```go
+// Here you go!
+// Client 1
+client1 := resty.New()
+client1.R().Get("http://httpbin.org")
+// ...
+
+// Client 2
+client2 := resty.New()
+client2.R().Head("http://httpbin.org")
+// ...
+
+// Bend it as per your need!!!
+```
+
+#### Remaining Client Settings & its Options
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Unique settings at Client level
+//--------------------------------
+// Enable debug mode
+client.SetDebug(true)
+
+// Assign Client TLSClientConfig
+// One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+
+// or One can disable security check (https)
+client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+
+// Set client timeout as per your need
+client.SetTimeout(1 * time.Minute)
+
+
+// You can override all below settings and options at request level if you want to
+//--------------------------------------------------------------------------------
+// Host URL for all request. So you can use relative URL in the request
+client.SetHostURL("http://httpbin.org")
+
+// Headers for all request
+client.SetHeader("Accept", "application/json")
+client.SetHeaders(map[string]string{
+ "Content-Type": "application/json",
+ "User-Agent": "My custom User Agent String",
+ })
+
+// Cookies for all request
+client.SetCookie(&http.Cookie{
+ Name:"go-resty",
+ Value:"This is cookie value",
+ Path: "/",
+ Domain: "sample.com",
+ MaxAge: 36000,
+ HttpOnly: true,
+ Secure: false,
+ })
+client.SetCookies(cookies)
+
+// URL query parameters for all request
+client.SetQueryParam("user_id", "00001")
+client.SetQueryParams(map[string]string{ // sample of those who use this manner
+ "api_key": "api-key-here",
+ "api_secert": "api-secert",
+ })
+client.R().SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+
+// Form data for all request. Typically used with POST and PUT
+client.SetFormData(map[string]string{
+ "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+ })
+
+// Basic Auth for all request
+client.SetBasicAuth("myuser", "mypass")
+
+// Bearer Auth Token for all request
+client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+
+// Enabling Content length value for all request
+client.SetContentLength(true)
+
+// Registering global Error object structure for JSON/XML request
+client.SetError(&Error{}) // or resty.SetError(Error{})
+```
+
+#### Unix Socket
+
+```go
+unixSocket := "/var/run/my_socket.sock"
+
+// Create a Go's http.Transport so we can set it in resty.
+transport := http.Transport{
+ Dial: func(_, _ string) (net.Conn, error) {
+ return net.Dial("unix", unixSocket)
+ },
+}
+
+// Create a Resty Client
+client := resty.New()
+
+// Set the previous transport that we created, set the scheme of the communication to the
+// socket and set the unixSocket as the HostURL.
+client.SetTransport(&transport).SetScheme("http").SetHostURL(unixSocket)
+
+// No need to write the host's URL on the request, just the path.
+client.R().Get("/index.html")
+```
+
+#### Bazel Support
+
+Resty can be built, tested and depended upon via [Bazel](https://bazel.build).
+For example, to run all tests:
+
+```shell
+bazel test :resty_test
+```
+
+#### Mocking http requests using [httpmock](https://github.com/jarcoal/httpmock) library
+
+In order to mock the http requests when testing your application you
+could use the `httpmock` library.
+
+When using the default resty client, you should pass the client to the library as follow:
+
+```go
+// Create a Resty Client
+client := resty.New()
+
+// Get the underlying HTTP Client and set it to Mock
+httpmock.ActivateNonDefault(client.GetClient())
+```
+
+More detailed example of mocking resty http requests using ginko could be found [here](https://github.com/jarcoal/httpmock#ginkgo--resty-example).
+
+## Versioning
+
+Resty releases versions according to [Semantic Versioning](http://semver.org)
+
+ * Resty v2 does not use `gopkg.in` service for library versioning.
+ * Resty fully adapted to `go mod` capabilities since `v1.10.0` release.
+ * Resty v1 series was using `gopkg.in` to provide versioning. `gopkg.in/resty.vX` points to appropriate tagged versions; `X` denotes version series number and it's a stable release for production use. For e.g. `gopkg.in/resty.v0`.
+ * Development takes place at the master branch. Although the code in master should always compile and test successfully, it might break API's. I aim to maintain backwards compatibility, but sometimes API's and behavior might be changed to fix a bug.
+
+## Contribution
+
+I would welcome your contribution! If you find any improvement or issue you want to fix, feel free to send a pull request, I like pull requests that include test cases for fix/enhancement. I have done my best to bring pretty good code coverage. Feel free to write tests.
+
+BTW, I'd like to know what you think about `Resty`. Kindly open an issue or send me an email; it'd mean a lot to me.
+
+## Creator
+
+[Jeevanandam M.](https://github.com/jeevatkm) (jeeva@myjeeva.com)
+
+## Core Team
+
+Have a look on [Members](https://github.com/orgs/go-resty/people) page.
+
+## Contributors
+
+Have a look on [Contributors](https://github.com/go-resty/resty/graphs/contributors) page.
+
+## License
+
+Resty released under MIT license, refer [LICENSE](LICENSE) file.
diff --git a/vendor/github.com/go-resty/resty/v2/WORKSPACE b/vendor/github.com/go-resty/resty/v2/WORKSPACE
new file mode 100644
index 0000000..9ef03e9
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/WORKSPACE
@@ -0,0 +1,31 @@
+workspace(name = "resty")
+
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "69de5c704a05ff37862f7e0f5534d4f479418afc21806c887db544a316f3cb6b",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.27.0/rules_go-v0.27.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
+ ],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
+
+go_rules_dependencies()
+
+go_register_toolchains(version = "1.16")
+
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
+
+gazelle_dependencies()
diff --git a/vendor/github.com/go-resty/resty/v2/client.go b/vendor/github.com/go-resty/resty/v2/client.go
new file mode 100644
index 0000000..1a03efa
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/client.go
@@ -0,0 +1,1115 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/json"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "net/http"
+ "net/url"
+ "reflect"
+ "regexp"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ // MethodGet HTTP method
+ MethodGet = "GET"
+
+ // MethodPost HTTP method
+ MethodPost = "POST"
+
+ // MethodPut HTTP method
+ MethodPut = "PUT"
+
+ // MethodDelete HTTP method
+ MethodDelete = "DELETE"
+
+ // MethodPatch HTTP method
+ MethodPatch = "PATCH"
+
+ // MethodHead HTTP method
+ MethodHead = "HEAD"
+
+ // MethodOptions HTTP method
+ MethodOptions = "OPTIONS"
+)
+
+var (
+ hdrUserAgentKey = http.CanonicalHeaderKey("User-Agent")
+ hdrAcceptKey = http.CanonicalHeaderKey("Accept")
+ hdrContentTypeKey = http.CanonicalHeaderKey("Content-Type")
+ hdrContentLengthKey = http.CanonicalHeaderKey("Content-Length")
+ hdrContentEncodingKey = http.CanonicalHeaderKey("Content-Encoding")
+ hdrLocationKey = http.CanonicalHeaderKey("Location")
+
+ plainTextType = "text/plain; charset=utf-8"
+ jsonContentType = "application/json"
+ formContentType = "application/x-www-form-urlencoded"
+
+ jsonCheck = regexp.MustCompile(`(?i:(application|text)/(json|.*\+json|json\-.*)(;|$))`)
+ xmlCheck = regexp.MustCompile(`(?i:(application|text)/(xml|.*\+xml)(;|$))`)
+
+ hdrUserAgentValue = "go-resty/" + Version + " (https://github.com/go-resty/resty)"
+ bufPool = &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}
+)
+
+type (
+ // RequestMiddleware type is for request middleware, called before a request is sent
+ RequestMiddleware func(*Client, *Request) error
+
+ // ResponseMiddleware type is for response middleware, called after a response has been received
+ ResponseMiddleware func(*Client, *Response) error
+
+ // PreRequestHook type is for the request hook, called right before the request is sent
+ PreRequestHook func(*Client, *http.Request) error
+
+ // RequestLogCallback type is for request logs, called before the request is logged
+ RequestLogCallback func(*RequestLog) error
+
+ // ResponseLogCallback type is for response logs, called before the response is logged
+ ResponseLogCallback func(*ResponseLog) error
+
+ // ErrorHook type is for reacting to request errors, called after all retries were attempted
+ ErrorHook func(*Request, error)
+)
+
+// Client struct is used to create Resty client with client level settings,
+// these settings are applicable to all the request raised from the client.
+//
+// Resty also provides an options to override most of the client settings
+// at request level.
+type Client struct {
+ BaseURL string
+ HostURL string // Deprecated: use BaseURL instead. To be removed in v3.0.0 release.
+ QueryParam url.Values
+ FormData url.Values
+ PathParams map[string]string
+ Header http.Header
+ UserInfo *User
+ Token string
+ AuthScheme string
+ Cookies []*http.Cookie
+ Error reflect.Type
+ Debug bool
+ DisableWarn bool
+ AllowGetMethodPayload bool
+ RetryCount int
+ RetryWaitTime time.Duration
+ RetryMaxWaitTime time.Duration
+ RetryConditions []RetryConditionFunc
+ RetryHooks []OnRetryFunc
+ RetryAfter RetryAfterFunc
+ JSONMarshal func(v interface{}) ([]byte, error)
+ JSONUnmarshal func(data []byte, v interface{}) error
+ XMLMarshal func(v interface{}) ([]byte, error)
+ XMLUnmarshal func(data []byte, v interface{}) error
+
+ // HeaderAuthorizationKey is used to set/access Request Authorization header
+ // value when `SetAuthToken` option is used.
+ HeaderAuthorizationKey string
+
+ jsonEscapeHTML bool
+ setContentLength bool
+ closeConnection bool
+ notParseResponse bool
+ trace bool
+ debugBodySizeLimit int64
+ outputDirectory string
+ scheme string
+ log Logger
+ httpClient *http.Client
+ proxyURL *url.URL
+ beforeRequest []RequestMiddleware
+ udBeforeRequest []RequestMiddleware
+ preReqHook PreRequestHook
+ afterResponse []ResponseMiddleware
+ requestLog RequestLogCallback
+ responseLog ResponseLogCallback
+ errorHooks []ErrorHook
+}
+
+// User type is to hold an username and password information
+type User struct {
+ Username, Password string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client methods
+//___________________________________
+
+// SetHostURL method is to set Host URL in the client instance. It will be used with request
+// raised from this client with relative URL
+// // Setting HTTP address
+// client.SetHostURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetHostURL("https://myjeeva.com")
+//
+// Deprecated: use SetBaseURL instead. To be removed in v3.0.0 release.
+func (c *Client) SetHostURL(url string) *Client {
+ c.SetBaseURL(url)
+ return c
+}
+
+// SetBaseURL method is to set Base URL in the client instance. It will be used with request
+// raised from this client with relative URL
+// // Setting HTTP address
+// client.SetBaseURL("http://myjeeva.com")
+//
+// // Setting HTTPS address
+// client.SetBaseURL("https://myjeeva.com")
+//
+// Since v2.7.0
+func (c *Client) SetBaseURL(url string) *Client {
+ c.BaseURL = strings.TrimRight(url, "/")
+ c.HostURL = c.BaseURL
+ return c
+}
+
+// SetHeader method sets a single header field and its value in the client instance.
+// These headers will be applied to all requests raised from this client instance.
+// Also it can be overridden at request level header options.
+//
+// See `Request.SetHeader` or `Request.SetHeaders`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+func (c *Client) SetHeader(header, value string) *Client {
+ c.Header.Set(header, value)
+ return c
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the client instance.
+// These headers will be applied to all requests raised from this client instance. Also it can be
+// overridden at request level headers options.
+//
+// See `Request.SetHeaders` or `Request.SetHeader`.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+func (c *Client) SetHeaders(headers map[string]string) *Client {
+ for h, v := range headers {
+ c.Header.Set(h, v)
+ }
+ return c
+}
+
+// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
+//
+// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
+// client.R().
+// SetHeaderVerbatim("all_lowercase", "available").
+// SetHeaderVerbatim("UPPERCASE", "available")
+//
+// Also you can override header value, which was set at client instance level.
+//
+// Since v2.6.0
+func (c *Client) SetHeaderVerbatim(header, value string) *Client {
+ c.Header[header] = []string{value}
+ return c
+}
+
+// SetCookieJar method sets custom http.CookieJar in the resty client. Its way to override default.
+//
+// For Example: sometimes we don't want to save cookies in api contacting, we can remove the default
+// CookieJar in resty client.
+//
+// client.SetCookieJar(nil)
+func (c *Client) SetCookieJar(jar http.CookieJar) *Client {
+ c.httpClient.Jar = jar
+ return c
+}
+
+// SetCookie method appends a single cookie in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+// client.SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+func (c *Client) SetCookie(hc *http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, hc)
+ return c
+}
+
+// SetCookies method sets an array of cookies in the client instance.
+// These cookies will be added to all the request raised from this client instance.
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty
+// client.SetCookies(cookies)
+func (c *Client) SetCookies(cs []*http.Cookie) *Client {
+ c.Cookies = append(c.Cookies, cs...)
+ return c
+}
+
+// SetQueryParam method sets single parameter and its value in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from
+// this client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParam` or `Request.SetQueryParams`.
+// client.
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+func (c *Client) SetQueryParam(param, value string) *Client {
+ c.QueryParam.Set(param, value)
+ return c
+}
+
+// SetQueryParams method sets multiple parameters and their values at one go in the client instance.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large`
+// in the URL after `?` mark. These query params will be added to all the request raised from this
+// client instance. Also it can be overridden at request level Query Param options.
+//
+// See `Request.SetQueryParams` or `Request.SetQueryParam`.
+// client.SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+func (c *Client) SetQueryParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetQueryParam(p, v)
+ }
+ return c
+}
+
+// SetFormData method sets Form parameters and their values in the client instance.
+// It's applicable only HTTP method `POST` and `PUT` and requets content type would be set as
+// `application/x-www-form-urlencoded`. These form data will be added to all the request raised from
+// this client instance. Also it can be overridden at request level form data.
+//
+// See `Request.SetFormData`.
+// client.SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+func (c *Client) SetFormData(data map[string]string) *Client {
+ for k, v := range data {
+ c.FormData.Set(k, v)
+ }
+ return c
+}
+
+// SetBasicAuth method sets the basic authentication header in the HTTP request. For Example:
+// Authorization: Basic
+//
+// For Example: To set the header for username "go-resty" and password "welcome"
+// client.SetBasicAuth("go-resty", "welcome")
+//
+// This basic auth information gets added to all the request rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetBasicAuth`.
+func (c *Client) SetBasicAuth(username, password string) *Client {
+ c.UserInfo = &User{Username: username, Password: password}
+ return c
+}
+
+// SetAuthToken method sets the auth token of the `Authorization` header for all HTTP requests.
+// The default auth scheme is `Bearer`, it can be customized with the method `SetAuthScheme`. For Example:
+// Authorization:
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This auth token gets added to all the requests rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthToken(token string) *Client {
+ c.Token = token
+ return c
+}
+
+// SetAuthScheme method sets the auth scheme type in the HTTP request. For Example:
+// Authorization:
+//
+// For Example: To set the scheme to use OAuth
+//
+// client.SetAuthScheme("OAuth")
+//
+// This auth scheme gets added to all the requests rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// Information about auth schemes can be found in RFC7235 which is linked to below
+// along with the page containing the currently defined official authentication schemes:
+// https://tools.ietf.org/html/rfc7235
+// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
+//
+// See `Request.SetAuthToken`.
+func (c *Client) SetAuthScheme(scheme string) *Client {
+ c.AuthScheme = scheme
+ return c
+}
+
+// R method creates a new request instance, its used for Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) R() *Request {
+ r := &Request{
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+
+ client: c,
+ multipartFiles: []*File{},
+ multipartFields: []*MultipartField{},
+ PathParams: map[string]string{},
+ jsonEscapeHTML: true,
+ }
+ return r
+}
+
+// NewRequest is an alias for method `R()`. Creates a new request instance, its used for
+// Get, Post, Put, Delete, Patch, Head, Options, etc.
+func (c *Client) NewRequest() *Request {
+ return c.R()
+}
+
+// OnBeforeRequest method appends request middleware into the before request chain.
+// Its gets applied after default Resty request middlewares and before request
+// been sent from Resty to host server.
+// client.OnBeforeRequest(func(c *resty.Client, r *resty.Request) error {
+// // Now you have access to Client and Request instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnBeforeRequest(m RequestMiddleware) *Client {
+ c.udBeforeRequest = append(c.udBeforeRequest, m)
+ return c
+}
+
+// OnAfterResponse method appends response middleware into the after response chain.
+// Once we receive response from host server, default Resty response middleware
+// gets applied and then user assigened response middlewares applied.
+// client.OnAfterResponse(func(c *resty.Client, r *resty.Response) error {
+// // Now you have access to Client and Response instance
+// // manipulate it as per your need
+//
+// return nil // if its success otherwise return error
+// })
+func (c *Client) OnAfterResponse(m ResponseMiddleware) *Client {
+ c.afterResponse = append(c.afterResponse, m)
+ return c
+}
+
+// OnError method adds a callback that will be run whenever a request execution fails.
+// This is called after all retries have been attempted (if any).
+// If there was a response from the server, the error will be wrapped in *ResponseError
+// which has the last response received from the server.
+//
+// client.OnError(func(req *resty.Request, err error) {
+// if v, ok := err.(*resty.ResponseError); ok {
+// // Do something with v.Response
+// }
+// // Log the error, increment a metric, etc...
+// })
+func (c *Client) OnError(h ErrorHook) *Client {
+ c.errorHooks = append(c.errorHooks, h)
+ return c
+}
+
+// SetPreRequestHook method sets the given pre-request function into resty client.
+// It is called right before the request is fired.
+//
+// Note: Only one pre-request hook can be registered. Use `client.OnBeforeRequest` for mutilple.
+func (c *Client) SetPreRequestHook(h PreRequestHook) *Client {
+ if c.preReqHook != nil {
+ c.log.Warnf("Overwriting an existing pre-request hook: %s", functionName(h))
+ }
+ c.preReqHook = h
+ return c
+}
+
+// SetDebug method enables the debug mode on Resty client. Client logs details of every request and response.
+// For `Request` it logs information such as HTTP verb, Relative URL path, Host, Headers, Body if it has one.
+// For `Response` it logs information such as Status, Response Time, Headers, Body if it has one.
+// client.SetDebug(true)
+func (c *Client) SetDebug(d bool) *Client {
+ c.Debug = d
+ return c
+}
+
+// SetDebugBodyLimit sets the maximum size for which the response and request body will be logged in debug mode.
+// client.SetDebugBodyLimit(1000000)
+func (c *Client) SetDebugBodyLimit(sl int64) *Client {
+ c.debugBodySizeLimit = sl
+ return c
+}
+
+// OnRequestLog method used to set request log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnRequestLog(rl RequestLogCallback) *Client {
+ if c.requestLog != nil {
+ c.log.Warnf("Overwriting an existing on-request-log callback from=%s to=%s",
+ functionName(c.requestLog), functionName(rl))
+ }
+ c.requestLog = rl
+ return c
+}
+
+// OnResponseLog method used to set response log callback into Resty. Registered callback gets
+// called before the resty actually logs the information.
+func (c *Client) OnResponseLog(rl ResponseLogCallback) *Client {
+ if c.responseLog != nil {
+ c.log.Warnf("Overwriting an existing on-response-log callback from=%s to=%s",
+ functionName(c.responseLog), functionName(rl))
+ }
+ c.responseLog = rl
+ return c
+}
+
+// SetDisableWarn method disables the warning message on Resty client.
+//
+// For Example: Resty warns the user when BasicAuth used on non-TLS mode.
+// client.SetDisableWarn(true)
+func (c *Client) SetDisableWarn(d bool) *Client {
+ c.DisableWarn = d
+ return c
+}
+
+// SetAllowGetMethodPayload method allows the GET method with payload on Resty client.
+//
+// For Example: Resty allows the user sends request with a payload on HTTP GET method.
+// client.SetAllowGetMethodPayload(true)
+func (c *Client) SetAllowGetMethodPayload(a bool) *Client {
+ c.AllowGetMethodPayload = a
+ return c
+}
+
+// SetLogger method sets given writer for logging Resty request and response details.
+//
+// Compliant to interface `resty.Logger`.
+func (c *Client) SetLogger(l Logger) *Client {
+ c.log = l
+ return c
+}
+
+// SetContentLength method enables the HTTP header `Content-Length` value for every request.
+// By default Resty won't set `Content-Length`.
+// client.SetContentLength(true)
+//
+// Also you have an option to enable for particular request. See `Request.SetContentLength`
+func (c *Client) SetContentLength(l bool) *Client {
+ c.setContentLength = l
+ return c
+}
+
+// SetTimeout method sets timeout for request raised from client.
+// client.SetTimeout(time.Duration(1 * time.Minute))
+func (c *Client) SetTimeout(timeout time.Duration) *Client {
+ c.httpClient.Timeout = timeout
+ return c
+}
+
+// SetError method is to register the global or client common `Error` object into Resty.
+// It is used for automatic unmarshalling if response status code is greater than 399 and
+// content type either JSON or XML. Can be pointer or non-pointer.
+// client.SetError(&Error{})
+// // OR
+// client.SetError(Error{})
+func (c *Client) SetError(err interface{}) *Client {
+ c.Error = typeOf(err)
+ return c
+}
+
+// SetRedirectPolicy method sets the client redirect poilicy. Resty provides ready to use
+// redirect policies. Wanna create one for yourself refer to `redirect.go`.
+//
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+//
+// // Need multiple redirect policies together
+// client.SetRedirectPolicy(FlexibleRedirectPolicy(20), DomainCheckRedirectPolicy("host1.com", "host2.net"))
+func (c *Client) SetRedirectPolicy(policies ...interface{}) *Client {
+ for _, p := range policies {
+ if _, ok := p.(RedirectPolicy); !ok {
+ c.log.Errorf("%v does not implement resty.RedirectPolicy (missing Apply method)",
+ functionName(p))
+ }
+ }
+
+ c.httpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+ for _, p := range policies {
+ if err := p.(RedirectPolicy).Apply(req, via); err != nil {
+ return err
+ }
+ }
+ return nil // looks good, go ahead
+ }
+
+ return c
+}
+
+// SetRetryCount method enables retry on Resty client and allows you
+// to set no. of retry count. Resty uses a Backoff mechanism.
+func (c *Client) SetRetryCount(count int) *Client {
+ c.RetryCount = count
+ return c
+}
+
+// SetRetryWaitTime method sets default wait time to sleep before retrying
+// request.
+//
+// Default is 100 milliseconds.
+func (c *Client) SetRetryWaitTime(waitTime time.Duration) *Client {
+ c.RetryWaitTime = waitTime
+ return c
+}
+
+// SetRetryMaxWaitTime method sets max wait time to sleep before retrying
+// request.
+//
+// Default is 2 seconds.
+func (c *Client) SetRetryMaxWaitTime(maxWaitTime time.Duration) *Client {
+ c.RetryMaxWaitTime = maxWaitTime
+ return c
+}
+
+// SetRetryAfter sets callback to calculate wait time between retries.
+// Default (nil) implies exponential backoff with jitter
+func (c *Client) SetRetryAfter(callback RetryAfterFunc) *Client {
+ c.RetryAfter = callback
+ return c
+}
+
+// AddRetryCondition method adds a retry condition function to array of functions
+// that are checked to determine if the request is retried. The request will
+// retry if any of the functions return true and error is nil.
+//
+// Note: These retry conditions are applied on all Request made using this Client.
+// For Request specific retry conditions check *Request.AddRetryCondition
+func (c *Client) AddRetryCondition(condition RetryConditionFunc) *Client {
+ c.RetryConditions = append(c.RetryConditions, condition)
+ return c
+}
+
+// AddRetryAfterErrorCondition adds the basic condition of retrying after encountering
+// an error from the http response
+//
+// Since v2.6.0
+func (c *Client) AddRetryAfterErrorCondition() *Client {
+ c.AddRetryCondition(func(response *Response, err error) bool {
+ return response.IsError()
+ })
+ return c
+}
+
+// AddRetryHook adds a side-effecting retry hook to an array of hooks
+// that will be executed on each retry.
+//
+// Since v2.6.0
+func (c *Client) AddRetryHook(hook OnRetryFunc) *Client {
+ c.RetryHooks = append(c.RetryHooks, hook)
+ return c
+}
+
+// SetTLSClientConfig method sets TLSClientConfig for underling client Transport.
+//
+// For Example:
+// // One can set custom root-certificate. Refer: http://golang.org/pkg/crypto/tls/#example_Dial
+// client.SetTLSClientConfig(&tls.Config{ RootCAs: roots })
+//
+// // or One can disable security check (https)
+// client.SetTLSClientConfig(&tls.Config{ InsecureSkipVerify: true })
+//
+// Note: This method overwrites existing `TLSClientConfig`.
+func (c *Client) SetTLSClientConfig(config *tls.Config) *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ transport.TLSClientConfig = config
+ return c
+}
+
+// SetProxy method sets the Proxy URL and Port for Resty client.
+// client.SetProxy("http://proxyserver:8888")
+//
+// OR Without this `SetProxy` method, you could also set Proxy via environment variable.
+//
+// Refer to godoc `http.ProxyFromEnvironment`.
+func (c *Client) SetProxy(proxyURL string) *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ pURL, err := url.Parse(proxyURL)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ c.proxyURL = pURL
+ transport.Proxy = http.ProxyURL(c.proxyURL)
+ return c
+}
+
+// RemoveProxy method removes the proxy configuration from Resty client
+// client.RemoveProxy()
+func (c *Client) RemoveProxy() *Client {
+ transport, err := c.transport()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ c.proxyURL = nil
+ transport.Proxy = nil
+ return c
+}
+
+// SetCertificates method helps to set client certificates into Resty conveniently.
+func (c *Client) SetCertificates(certs ...tls.Certificate) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ config.Certificates = append(config.Certificates, certs...)
+ return c
+}
+
+// SetRootCertificate method helps to add one or more root certificates into Resty client
+// client.SetRootCertificate("/path/to/root/pemFile.pem")
+func (c *Client) SetRootCertificate(pemFilePath string) *Client {
+ rootPemData, err := ioutil.ReadFile(pemFilePath)
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM(rootPemData)
+ return c
+}
+
+// SetRootCertificateFromString method helps to add one or more root certificates into Resty client
+// client.SetRootCertificateFromString("pem file content")
+func (c *Client) SetRootCertificateFromString(pemContent string) *Client {
+ config, err := c.tlsConfig()
+ if err != nil {
+ c.log.Errorf("%v", err)
+ return c
+ }
+ if config.RootCAs == nil {
+ config.RootCAs = x509.NewCertPool()
+ }
+
+ config.RootCAs.AppendCertsFromPEM([]byte(pemContent))
+ return c
+}
+
+// SetOutputDirectory method sets output directory for saving HTTP response into file.
+// If the output directory not exists then resty creates one. This setting is optional one,
+// if you're planning using absolute path in `Request.SetOutput` and can used together.
+// client.SetOutputDirectory("/save/http/response/here")
+func (c *Client) SetOutputDirectory(dirPath string) *Client {
+ c.outputDirectory = dirPath
+ return c
+}
+
+// SetTransport method sets custom `*http.Transport` or any `http.RoundTripper`
+// compatible interface implementation in the resty client.
+//
+// Note:
+//
+// - If transport is not type of `*http.Transport` then you may not be able to
+// take advantage of some of the Resty client settings.
+//
+// - It overwrites the Resty client transport instance and it's configurations.
+//
+// transport := &http.Transport{
+// // somthing like Proxying to httptest.Server, etc...
+// Proxy: func(req *http.Request) (*url.URL, error) {
+// return url.Parse(server.URL)
+// },
+// }
+//
+// client.SetTransport(transport)
+func (c *Client) SetTransport(transport http.RoundTripper) *Client {
+ if transport != nil {
+ c.httpClient.Transport = transport
+ }
+ return c
+}
+
+// SetScheme method sets custom scheme in the Resty client. It's way to override default.
+// client.SetScheme("http")
+func (c *Client) SetScheme(scheme string) *Client {
+ if !IsStringEmpty(scheme) {
+ c.scheme = strings.TrimSpace(scheme)
+ }
+ return c
+}
+
+// SetCloseConnection method sets variable `Close` in http request struct with the given
+// value. More info: https://golang.org/src/net/http/request.go
+func (c *Client) SetCloseConnection(close bool) *Client {
+ c.closeConnection = close
+ return c
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (c *Client) SetDoNotParseResponse(parse bool) *Client {
+ c.notParseResponse = parse
+ return c
+}
+
+// SetPathParam method sets single URL path key-value pair in the
+// Resty client instance.
+// client.SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+// It replaces the value of the key while composing the request URL.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+func (c *Client) SetPathParam(param, value string) *Client {
+ c.PathParams[param] = value
+ return c
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty client instance.
+// client.SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/details
+// Composed URL - /v1/users/sample@sample.com/100002/details
+// It replaces the value of the key while composing the request URL.
+//
+// Also it can be overridden at request level Path Params options,
+// see `Request.SetPathParam` or `Request.SetPathParams`.
+func (c *Client) SetPathParams(params map[string]string) *Client {
+ for p, v := range params {
+ c.SetPathParam(p, v)
+ }
+ return c
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (c *Client) SetJSONEscapeHTML(b bool) *Client {
+ c.jsonEscapeHTML = b
+ return c
+}
+
+// EnableTrace method enables the Resty client trace for the requests fired from
+// the client using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New().EnableTrace()
+//
+// resp, err := client.R().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// Also `Request.EnableTrace` available too to get trace info for single request.
+//
+// Since v2.0.0
+func (c *Client) EnableTrace() *Client {
+ c.trace = true
+ return c
+}
+
+// DisableTrace method disables the Resty client trace. Refer to `Client.EnableTrace`.
+//
+// Since v2.0.0
+func (c *Client) DisableTrace() *Client {
+ c.trace = false
+ return c
+}
+
+// IsProxySet method returns the true is proxy is set from resty client otherwise
+// false. By default proxy is set from environment, refer to `http.ProxyFromEnvironment`.
+func (c *Client) IsProxySet() bool {
+ return c.proxyURL != nil
+}
+
+// GetClient method returns the current `http.Client` used by the resty client.
+func (c *Client) GetClient() *http.Client {
+ return c.httpClient
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Client Unexported methods
+//_______________________________________________________________________
+
+// Executes method executes the given `Request` object and returns response
+// error.
+func (c *Client) execute(req *Request) (*Response, error) {
+ // Apply Request middleware
+ var err error
+
+ // user defined on before request methods
+ // to modify the *resty.Request object
+ for _, f := range c.udBeforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ // resty middlewares
+ for _, f := range c.beforeRequest {
+ if err = f(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ if hostHeader := req.Header.Get("Host"); hostHeader != "" {
+ req.RawRequest.Host = hostHeader
+ }
+
+ // call pre-request if defined
+ if c.preReqHook != nil {
+ if err = c.preReqHook(c, req.RawRequest); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+ }
+
+ if err = requestLogger(c, req); err != nil {
+ return nil, wrapNoRetryErr(err)
+ }
+
+ req.RawRequest.Body = newRequestBodyReleaser(req.RawRequest.Body, req.bodyBuf)
+
+ req.Time = time.Now()
+ resp, err := c.httpClient.Do(req.RawRequest)
+
+ response := &Response{
+ Request: req,
+ RawResponse: resp,
+ }
+
+ if err != nil || req.notParseResponse || c.notParseResponse {
+ response.setReceivedAt()
+ return response, err
+ }
+
+ if !req.isSaveResponse {
+ defer closeq(resp.Body)
+ body := resp.Body
+
+ // GitHub #142 & #187
+ if strings.EqualFold(resp.Header.Get(hdrContentEncodingKey), "gzip") && resp.ContentLength != 0 {
+ if _, ok := body.(*gzip.Reader); !ok {
+ body, err = gzip.NewReader(body)
+ if err != nil {
+ response.setReceivedAt()
+ return response, err
+ }
+ defer closeq(body)
+ }
+ }
+
+ if response.body, err = ioutil.ReadAll(body); err != nil {
+ response.setReceivedAt()
+ return response, err
+ }
+
+ response.size = int64(len(response.body))
+ }
+
+ response.setReceivedAt() // after we read the body
+
+ // Apply Response middleware
+ for _, f := range c.afterResponse {
+ if err = f(c, response); err != nil {
+ break
+ }
+ }
+
+ return response, wrapNoRetryErr(err)
+}
+
+// getting TLS client config if not exists then create one
+func (c *Client) tlsConfig() (*tls.Config, error) {
+ transport, err := c.transport()
+ if err != nil {
+ return nil, err
+ }
+ if transport.TLSClientConfig == nil {
+ transport.TLSClientConfig = &tls.Config{}
+ }
+ return transport.TLSClientConfig, nil
+}
+
+// Transport method returns `*http.Transport` currently in use or error
+// in case currently used `transport` is not a `*http.Transport`.
+func (c *Client) transport() (*http.Transport, error) {
+ if transport, ok := c.httpClient.Transport.(*http.Transport); ok {
+ return transport, nil
+ }
+ return nil, errors.New("current transport is not an *http.Transport instance")
+}
+
+// just an internal helper method
+func (c *Client) outputLogTo(w io.Writer) *Client {
+ c.log.(*logger).l.SetOutput(w)
+ return c
+}
+
+// ResponseError is a wrapper for including the server response with an error.
+// Neither the err nor the response should be nil.
+type ResponseError struct {
+ Response *Response
+ Err error
+}
+
+func (e *ResponseError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *ResponseError) Unwrap() error {
+ return e.Err
+}
+
+// Helper to run onErrorHooks hooks.
+// It wraps the error in a ResponseError if the resp is not nil
+// so hooks can access it.
+func (c *Client) onErrorHooks(req *Request, resp *Response, err error) {
+ if err != nil {
+ if resp != nil { // wrap with ResponseError
+ err = &ResponseError{Response: resp, Err: err}
+ }
+ for _, h := range c.errorHooks {
+ h(req, err)
+ }
+ }
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// File struct and its methods
+//_______________________________________________________________________
+
+// File struct represent file information for multipart request
+type File struct {
+ Name string
+ ParamName string
+ io.Reader
+}
+
+// String returns string value of current file details
+func (f *File) String() string {
+ return fmt.Sprintf("ParamName: %v; FileName: %v", f.ParamName, f.Name)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// MultipartField struct
+//_______________________________________________________________________
+
+// MultipartField struct represent custom data part for multipart request
+type MultipartField struct {
+ Param string
+ FileName string
+ ContentType string
+ io.Reader
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Unexported package methods
+//_______________________________________________________________________
+
+func createClient(hc *http.Client) *Client {
+ if hc.Transport == nil {
+ hc.Transport = createTransport(nil)
+ }
+
+ c := &Client{ // not setting lang default values
+ QueryParam: url.Values{},
+ FormData: url.Values{},
+ Header: http.Header{},
+ Cookies: make([]*http.Cookie, 0),
+ RetryWaitTime: defaultWaitTime,
+ RetryMaxWaitTime: defaultMaxWaitTime,
+ PathParams: make(map[string]string),
+ JSONMarshal: json.Marshal,
+ JSONUnmarshal: json.Unmarshal,
+ XMLMarshal: xml.Marshal,
+ XMLUnmarshal: xml.Unmarshal,
+ HeaderAuthorizationKey: http.CanonicalHeaderKey("Authorization"),
+
+ jsonEscapeHTML: true,
+ httpClient: hc,
+ debugBodySizeLimit: math.MaxInt32,
+ }
+
+ // Logger
+ c.SetLogger(createLogger())
+
+ // default before request middlewares
+ c.beforeRequest = []RequestMiddleware{
+ parseRequestURL,
+ parseRequestHeader,
+ parseRequestBody,
+ createHTTPRequest,
+ addCredentials,
+ }
+
+ // user defined request middlewares
+ c.udBeforeRequest = []RequestMiddleware{}
+
+ // default after response middlewares
+ c.afterResponse = []ResponseMiddleware{
+ responseLogger,
+ parseResponseBody,
+ saveResponseIntoFile,
+ }
+
+ return c
+}
diff --git a/vendor/github.com/go-resty/resty/v2/go.mod b/vendor/github.com/go-resty/resty/v2/go.mod
new file mode 100644
index 0000000..5e78bdc
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/go.mod
@@ -0,0 +1,5 @@
+module github.com/go-resty/resty/v2
+
+require golang.org/x/net v0.0.0-20211029224645-99673261e6eb
+
+go 1.11
diff --git a/vendor/github.com/go-resty/resty/v2/go.sum b/vendor/github.com/go-resty/resty/v2/go.sum
new file mode 100644
index 0000000..07a4515
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/go.sum
@@ -0,0 +1,7 @@
+golang.org/x/net v0.0.0-20211029224645-99673261e6eb h1:pirldcYWx7rx7kE5r+9WsOXPXK0+WH5+uZ7uPmJ44uM=
+golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/vendor/github.com/go-resty/resty/v2/middleware.go b/vendor/github.com/go-resty/resty/v2/middleware.go
new file mode 100644
index 0000000..0e8ac2b
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/middleware.go
@@ -0,0 +1,543 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "time"
+)
+
+const debugRequestLogKey = "__restyDebugRequestLog"
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Middleware(s)
+//_______________________________________________________________________
+
+func parseRequestURL(c *Client, r *Request) error {
+ // GitHub #103 Path Params
+ if len(r.PathParams) > 0 {
+ for p, v := range r.PathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+ if len(c.PathParams) > 0 {
+ for p, v := range c.PathParams {
+ r.URL = strings.Replace(r.URL, "{"+p+"}", url.PathEscape(v), -1)
+ }
+ }
+
+ // Parsing request URL
+ reqURL, err := url.Parse(r.URL)
+ if err != nil {
+ return err
+ }
+
+ // If Request.URL is relative path then added c.HostURL into
+ // the request URL otherwise Request.URL will be used as-is
+ if !reqURL.IsAbs() {
+ r.URL = reqURL.String()
+ if len(r.URL) > 0 && r.URL[0] != '/' {
+ r.URL = "/" + r.URL
+ }
+
+ reqURL, err = url.Parse(c.HostURL + r.URL)
+ if err != nil {
+ return err
+ }
+ }
+
+ // GH #407 && #318
+ if reqURL.Scheme == "" && len(c.scheme) > 0 {
+ reqURL.Scheme = c.scheme
+ }
+
+ // Adding Query Param
+ query := make(url.Values)
+ for k, v := range c.QueryParam {
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.QueryParam {
+ // remove query param from client level by key
+ // since overrides happens for that key in the request
+ query.Del(k)
+
+ for _, iv := range v {
+ query.Add(k, iv)
+ }
+ }
+
+ // GitHub #123 Preserve query string order partially.
+ // Since not feasible in `SetQuery*` resty methods, because
+ // standard package `url.Encode(...)` sorts the query params
+ // alphabetically
+ if len(query) > 0 {
+ if IsStringEmpty(reqURL.RawQuery) {
+ reqURL.RawQuery = query.Encode()
+ } else {
+ reqURL.RawQuery = reqURL.RawQuery + "&" + query.Encode()
+ }
+ }
+
+ r.URL = reqURL.String()
+
+ return nil
+}
+
+func parseRequestHeader(c *Client, r *Request) error {
+ hdr := make(http.Header)
+ for k := range c.Header {
+ hdr[k] = append(hdr[k], c.Header[k]...)
+ }
+
+ for k := range r.Header {
+ hdr.Del(k)
+ hdr[k] = append(hdr[k], r.Header[k]...)
+ }
+
+ if IsStringEmpty(hdr.Get(hdrUserAgentKey)) {
+ hdr.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+
+ ct := hdr.Get(hdrContentTypeKey)
+ if IsStringEmpty(hdr.Get(hdrAcceptKey)) && !IsStringEmpty(ct) &&
+ (IsJSONType(ct) || IsXMLType(ct)) {
+ hdr.Set(hdrAcceptKey, hdr.Get(hdrContentTypeKey))
+ }
+
+ r.Header = hdr
+
+ return nil
+}
+
+func parseRequestBody(c *Client, r *Request) (err error) {
+ if isPayloadSupported(r.Method, c.AllowGetMethodPayload) {
+ // Handling Multipart
+ if r.isMultiPart && !(r.Method == MethodPatch) {
+ if err = handleMultipart(c, r); err != nil {
+ return
+ }
+
+ goto CL
+ }
+
+ // Handling Form Data
+ if len(c.FormData) > 0 || len(r.FormData) > 0 {
+ handleFormData(c, r)
+
+ goto CL
+ }
+
+ // Handling Request body
+ if r.Body != nil {
+ handleContentType(c, r)
+
+ if err = handleRequestBody(c, r); err != nil {
+ return
+ }
+ }
+ }
+
+CL:
+ // by default resty won't set content length, you can if you want to :)
+ if (c.setContentLength || r.setContentLength) && r.bodyBuf != nil {
+ r.Header.Set(hdrContentLengthKey, fmt.Sprintf("%d", r.bodyBuf.Len()))
+ }
+
+ return
+}
+
+func createHTTPRequest(c *Client, r *Request) (err error) {
+ if r.bodyBuf == nil {
+ if reader, ok := r.Body.(io.Reader); ok {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, reader)
+ } else if c.setContentLength || r.setContentLength {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, http.NoBody)
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, nil)
+ }
+ } else {
+ r.RawRequest, err = http.NewRequest(r.Method, r.URL, r.bodyBuf)
+ }
+
+ if err != nil {
+ return
+ }
+
+ // Assign close connection option
+ r.RawRequest.Close = c.closeConnection
+
+ // Add headers into http request
+ r.RawRequest.Header = r.Header
+
+ // Add cookies from client instance into http request
+ for _, cookie := range c.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Add cookies from request instance into http request
+ for _, cookie := range r.Cookies {
+ r.RawRequest.AddCookie(cookie)
+ }
+
+ // Enable trace
+ if c.trace || r.trace {
+ r.clientTrace = &clientTrace{}
+ r.ctx = r.clientTrace.createContext(r.Context())
+ }
+
+ // Use context if it was specified
+ if r.ctx != nil {
+ r.RawRequest = r.RawRequest.WithContext(r.ctx)
+ }
+
+ bodyCopy, err := getBodyCopy(r)
+ if err != nil {
+ return err
+ }
+
+ // assign get body func for the underlying raw request instance
+ r.RawRequest.GetBody = func() (io.ReadCloser, error) {
+ if bodyCopy != nil {
+ return ioutil.NopCloser(bytes.NewReader(bodyCopy.Bytes())), nil
+ }
+ return nil, nil
+ }
+
+ return
+}
+
+func addCredentials(c *Client, r *Request) error {
+ var isBasicAuth bool
+ // Basic Auth
+ if r.UserInfo != nil { // takes precedence
+ r.RawRequest.SetBasicAuth(r.UserInfo.Username, r.UserInfo.Password)
+ isBasicAuth = true
+ } else if c.UserInfo != nil {
+ r.RawRequest.SetBasicAuth(c.UserInfo.Username, c.UserInfo.Password)
+ isBasicAuth = true
+ }
+
+ if !c.DisableWarn {
+ if isBasicAuth && !strings.HasPrefix(r.URL, "https") {
+ c.log.Warnf("Using Basic Auth in HTTP mode is not secure, use HTTPS")
+ }
+ }
+
+ // Set the Authorization Header Scheme
+ var authScheme string
+ if !IsStringEmpty(r.AuthScheme) {
+ authScheme = r.AuthScheme
+ } else if !IsStringEmpty(c.AuthScheme) {
+ authScheme = c.AuthScheme
+ } else {
+ authScheme = "Bearer"
+ }
+
+ // Build the Token Auth header
+ if !IsStringEmpty(r.Token) { // takes precedence
+ r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+r.Token)
+ } else if !IsStringEmpty(c.Token) {
+ r.RawRequest.Header.Set(c.HeaderAuthorizationKey, authScheme+" "+c.Token)
+ }
+
+ return nil
+}
+
+func requestLogger(c *Client, r *Request) error {
+ if c.Debug {
+ rr := r.RawRequest
+ rl := &RequestLog{Header: copyHeaders(rr.Header), Body: r.fmtBodyString(c.debugBodySizeLimit)}
+ if c.requestLog != nil {
+ if err := c.requestLog(rl); err != nil {
+ return err
+ }
+ }
+ // fmt.Sprintf("COOKIES:\n%s\n", composeCookies(c.GetClient().Jar, *rr.URL)) +
+
+ reqLog := "\n==============================================================================\n" +
+ "~~~ REQUEST ~~~\n" +
+ fmt.Sprintf("%s %s %s\n", r.Method, rr.URL.RequestURI(), rr.Proto) +
+ fmt.Sprintf("HOST : %s\n", rr.URL.Host) +
+ fmt.Sprintf("HEADERS:\n%s\n", composeHeaders(c, r, rl.Header)) +
+ fmt.Sprintf("BODY :\n%v\n", rl.Body) +
+ "------------------------------------------------------------------------------\n"
+
+ r.initValuesMap()
+ r.values[debugRequestLogKey] = reqLog
+ }
+
+ return nil
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Middleware(s)
+//_______________________________________________________________________
+
+func responseLogger(c *Client, res *Response) error {
+ if c.Debug {
+ rl := &ResponseLog{Header: copyHeaders(res.Header()), Body: res.fmtBodyString(c.debugBodySizeLimit)}
+ if c.responseLog != nil {
+ if err := c.responseLog(rl); err != nil {
+ return err
+ }
+ }
+
+ debugLog := res.Request.values[debugRequestLogKey].(string)
+ debugLog += "~~~ RESPONSE ~~~\n" +
+ fmt.Sprintf("STATUS : %s\n", res.Status()) +
+ fmt.Sprintf("PROTO : %s\n", res.RawResponse.Proto) +
+ fmt.Sprintf("RECEIVED AT : %v\n", res.ReceivedAt().Format(time.RFC3339Nano)) +
+ fmt.Sprintf("TIME DURATION: %v\n", res.Time()) +
+ "HEADERS :\n" +
+ composeHeaders(c, res.Request, rl.Header) + "\n"
+ if res.Request.isSaveResponse {
+ debugLog += "BODY :\n***** RESPONSE WRITTEN INTO FILE *****\n"
+ } else {
+ debugLog += fmt.Sprintf("BODY :\n%v\n", rl.Body)
+ }
+ debugLog += "==============================================================================\n"
+
+ c.log.Debugf("%s", debugLog)
+ }
+
+ return nil
+}
+
+func parseResponseBody(c *Client, res *Response) (err error) {
+ if res.StatusCode() == http.StatusNoContent {
+ return
+ }
+ // Handles only JSON or XML content type
+ ct := firstNonEmpty(res.Request.forceContentType, res.Header().Get(hdrContentTypeKey), res.Request.fallbackContentType)
+ if IsJSONType(ct) || IsXMLType(ct) {
+ // HTTP status code > 199 and < 300, considered as Result
+ if res.IsSuccess() {
+ res.Request.Error = nil
+ if res.Request.Result != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Result)
+ return
+ }
+ }
+
+ // HTTP status code > 399, considered as Error
+ if res.IsError() {
+ // global error interface
+ if res.Request.Error == nil && c.Error != nil {
+ res.Request.Error = reflect.New(c.Error).Interface()
+ }
+
+ if res.Request.Error != nil {
+ err = Unmarshalc(c, ct, res.body, res.Request.Error)
+ }
+ }
+ }
+
+ return
+}
+
+func handleMultipart(c *Client, r *Request) (err error) {
+ r.bodyBuf = acquireBuffer()
+ w := multipart.NewWriter(r.bodyBuf)
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+
+ for k, v := range r.FormData {
+ for _, iv := range v {
+ if strings.HasPrefix(k, "@") { // file
+ err = addFile(w, k[1:], iv)
+ if err != nil {
+ return
+ }
+ } else { // form value
+ if err = w.WriteField(k, iv); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ // #21 - adding io.Reader support
+ if len(r.multipartFiles) > 0 {
+ for _, f := range r.multipartFiles {
+ err = addFileReader(w, f)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ // GitHub #130 adding multipart field support with content type
+ if len(r.multipartFields) > 0 {
+ for _, mf := range r.multipartFields {
+ if err = addMultipartFormField(w, mf); err != nil {
+ return
+ }
+ }
+ }
+
+ r.Header.Set(hdrContentTypeKey, w.FormDataContentType())
+ err = w.Close()
+
+ return
+}
+
+func handleFormData(c *Client, r *Request) {
+ formData := url.Values{}
+
+ for k, v := range c.FormData {
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ for k, v := range r.FormData {
+ // remove form data field from client level by key
+ // since overrides happens for that key in the request
+ formData.Del(k)
+
+ for _, iv := range v {
+ formData.Add(k, iv)
+ }
+ }
+
+ r.bodyBuf = bytes.NewBuffer([]byte(formData.Encode()))
+ r.Header.Set(hdrContentTypeKey, formContentType)
+ r.isFormData = true
+}
+
+func handleContentType(c *Client, r *Request) {
+ contentType := r.Header.Get(hdrContentTypeKey)
+ if IsStringEmpty(contentType) {
+ contentType = DetectContentType(r.Body)
+ r.Header.Set(hdrContentTypeKey, contentType)
+ }
+}
+
+func handleRequestBody(c *Client, r *Request) (err error) {
+ var bodyBytes []byte
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ r.bodyBuf = nil
+
+ if reader, ok := r.Body.(io.Reader); ok {
+ if c.setContentLength || r.setContentLength { // keep backward compatibility
+ r.bodyBuf = acquireBuffer()
+ _, err = r.bodyBuf.ReadFrom(reader)
+ r.Body = nil
+ } else {
+ // Otherwise buffer less processing for `io.Reader`, sounds good.
+ return
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ bodyBytes = b
+ } else if s, ok := r.Body.(string); ok {
+ bodyBytes = []byte(s)
+ } else if IsJSONType(contentType) &&
+ (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice) {
+ r.bodyBuf, err = jsonMarshal(c, r, r.Body)
+ if err != nil {
+ return
+ }
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ bodyBytes, err = c.XMLMarshal(r.Body)
+ if err != nil {
+ return
+ }
+ }
+
+ if bodyBytes == nil && r.bodyBuf == nil {
+ err = errors.New("unsupported 'Body' type/value")
+ }
+
+ // if any errors during body bytes handling, return it
+ if err != nil {
+ return
+ }
+
+ // []byte into Buffer
+ if bodyBytes != nil && r.bodyBuf == nil {
+ r.bodyBuf = acquireBuffer()
+ _, _ = r.bodyBuf.Write(bodyBytes)
+ }
+
+ return
+}
+
+func saveResponseIntoFile(c *Client, res *Response) error {
+ if res.Request.isSaveResponse {
+ file := ""
+
+ if len(c.outputDirectory) > 0 && !filepath.IsAbs(res.Request.outputFile) {
+ file += c.outputDirectory + string(filepath.Separator)
+ }
+
+ file = filepath.Clean(file + res.Request.outputFile)
+ if err := createDirectory(filepath.Dir(file)); err != nil {
+ return err
+ }
+
+ outFile, err := os.Create(file)
+ if err != nil {
+ return err
+ }
+ defer closeq(outFile)
+
+ // io.Copy reads maximum 32kb size, it is perfect for large file download too
+ defer closeq(res.RawResponse.Body)
+
+ written, err := io.Copy(outFile, res.RawResponse.Body)
+ if err != nil {
+ return err
+ }
+
+ res.size = written
+ }
+
+ return nil
+}
+
+func getBodyCopy(r *Request) (*bytes.Buffer, error) {
+ // If r.bodyBuf present, return the copy
+ if r.bodyBuf != nil {
+ return bytes.NewBuffer(r.bodyBuf.Bytes()), nil
+ }
+
+ // Maybe body is `io.Reader`.
+ // Note: Resty user have to watchout for large body size of `io.Reader`
+ if r.RawRequest.Body != nil {
+ b, err := ioutil.ReadAll(r.RawRequest.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ // Restore the Body
+ closeq(r.RawRequest.Body)
+ r.RawRequest.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+
+ // Return the Body bytes
+ return bytes.NewBuffer(b), nil
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/redirect.go b/vendor/github.com/go-resty/resty/v2/redirect.go
new file mode 100644
index 0000000..7d7e43b
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/redirect.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "strings"
+)
+
+type (
+ // RedirectPolicy to regulate the redirects in the resty client.
+ // Objects implementing the RedirectPolicy interface can be registered as
+ //
+ // Apply function should return nil to continue the redirect jounery, otherwise
+ // return error to stop the redirect.
+ RedirectPolicy interface {
+ Apply(req *http.Request, via []*http.Request) error
+ }
+
+ // The RedirectPolicyFunc type is an adapter to allow the use of ordinary functions as RedirectPolicy.
+ // If f is a function with the appropriate signature, RedirectPolicyFunc(f) is a RedirectPolicy object that calls f.
+ RedirectPolicyFunc func(*http.Request, []*http.Request) error
+)
+
+// Apply calls f(req, via).
+func (f RedirectPolicyFunc) Apply(req *http.Request, via []*http.Request) error {
+ return f(req, via)
+}
+
+// NoRedirectPolicy is used to disable redirects in the HTTP client
+// resty.SetRedirectPolicy(NoRedirectPolicy())
+func NoRedirectPolicy() RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ return errors.New("auto redirect is disabled")
+ })
+}
+
+// FlexibleRedirectPolicy is convenient method to create No of redirect policy for HTTP client.
+// resty.SetRedirectPolicy(FlexibleRedirectPolicy(20))
+func FlexibleRedirectPolicy(noOfRedirect int) RedirectPolicy {
+ return RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if len(via) >= noOfRedirect {
+ return fmt.Errorf("stopped after %d redirects", noOfRedirect)
+ }
+ checkHostAndAddHeaders(req, via[0])
+ return nil
+ })
+}
+
+// DomainCheckRedirectPolicy is convenient method to define domain name redirect rule in resty client.
+// Redirect is allowed for only mentioned host in the policy.
+// resty.SetRedirectPolicy(DomainCheckRedirectPolicy("host1.com", "host2.org", "host3.net"))
+func DomainCheckRedirectPolicy(hostnames ...string) RedirectPolicy {
+ hosts := make(map[string]bool)
+ for _, h := range hostnames {
+ hosts[strings.ToLower(h)] = true
+ }
+
+ fn := RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
+ if ok := hosts[getHostname(req.URL.Host)]; !ok {
+ return errors.New("redirect is not allowed as per DomainCheckRedirectPolicy")
+ }
+
+ return nil
+ })
+
+ return fn
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+func getHostname(host string) (hostname string) {
+ if strings.Index(host, ":") > 0 {
+ host, _, _ = net.SplitHostPort(host)
+ }
+ hostname = strings.ToLower(host)
+ return
+}
+
+// By default Golang will not redirect request headers
+// after go throughing various discussion comments from thread
+// https://github.com/golang/go/issues/4800
+// Resty will add all the headers during a redirect for the same host
+func checkHostAndAddHeaders(cur *http.Request, pre *http.Request) {
+ curHostname := getHostname(cur.URL.Host)
+ preHostname := getHostname(pre.URL.Host)
+ if strings.EqualFold(curHostname, preHostname) {
+ for key, val := range pre.Header {
+ cur.Header[key] = val
+ }
+ } else { // only library User-Agent header is added
+ cur.Header.Set(hdrUserAgentKey, hdrUserAgentValue)
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/request.go b/vendor/github.com/go-resty/resty/v2/request.go
new file mode 100644
index 0000000..672df88
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/request.go
@@ -0,0 +1,896 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request struct and methods
+//_______________________________________________________________________
+
+// Request struct is used to compose and fire individual request from
+// resty client. Request provides an options to override client level
+// settings and also an options for the request composition.
+type Request struct {
+ URL string
+ Method string
+ Token string
+ AuthScheme string
+ QueryParam url.Values
+ FormData url.Values
+ PathParams map[string]string
+ Header http.Header
+ Time time.Time
+ Body interface{}
+ Result interface{}
+ Error interface{}
+ RawRequest *http.Request
+ SRV *SRVRecord
+ UserInfo *User
+ Cookies []*http.Cookie
+
+ // Attempt is to represent the request attempt made during a Resty
+ // request execution flow, including retry count.
+ //
+ // Since v2.4.0
+ Attempt int
+
+ isMultiPart bool
+ isFormData bool
+ setContentLength bool
+ isSaveResponse bool
+ notParseResponse bool
+ jsonEscapeHTML bool
+ trace bool
+ outputFile string
+ fallbackContentType string
+ forceContentType string
+ ctx context.Context
+ values map[string]interface{}
+ client *Client
+ bodyBuf *bytes.Buffer
+ clientTrace *clientTrace
+ multipartFiles []*File
+ multipartFields []*MultipartField
+ retryConditions []RetryConditionFunc
+}
+
+// Context method returns the Context if its already set in request
+// otherwise it creates new one using `context.Background()`.
+func (r *Request) Context() context.Context {
+ if r.ctx == nil {
+ return context.Background()
+ }
+ return r.ctx
+}
+
+// SetContext method sets the context.Context for current Request. It allows
+// to interrupt the request execution if ctx.Done() channel is closed.
+// See https://blog.golang.org/context article and the "context" package
+// documentation.
+func (r *Request) SetContext(ctx context.Context) *Request {
+ r.ctx = ctx
+ return r
+}
+
+// SetHeader method is to set a single header field and its value in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`.
+// client.R().
+// SetHeader("Content-Type", "application/json").
+// SetHeader("Accept", "application/json")
+//
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeader(header, value string) *Request {
+ r.Header.Set(header, value)
+ return r
+}
+
+// SetHeaders method sets multiple headers field and its values at one go in the current request.
+//
+// For Example: To set `Content-Type` and `Accept` as `application/json`
+//
+// client.R().
+// SetHeaders(map[string]string{
+// "Content-Type": "application/json",
+// "Accept": "application/json",
+// })
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaders(headers map[string]string) *Request {
+ for h, v := range headers {
+ r.SetHeader(h, v)
+ }
+ return r
+}
+
+// SetHeaderMultiValues sets multiple headers fields and its values is list of strings at one go in the current request.
+//
+// For Example: To set `Accept` as `text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8`
+//
+// client.R().
+// SetHeaderMultiValues(map[string][]string{
+// "Accept": []string{"text/html", "application/xhtml+xml", "application/xml;q=0.9", "image/webp", "*/*;q=0.8"},
+// })
+// Also you can override header value, which was set at client instance level.
+func (r *Request) SetHeaderMultiValues(headers map[string][]string) *Request {
+ for key, values := range headers {
+ r.SetHeader(key, strings.Join(values, ", "))
+ }
+ return r
+}
+
+// SetHeaderVerbatim method is to set a single header field and its value verbatim in the current request.
+//
+// For Example: To set `all_lowercase` and `UPPERCASE` as `available`.
+// client.R().
+// SetHeaderVerbatim("all_lowercase", "available").
+// SetHeaderVerbatim("UPPERCASE", "available")
+//
+// Also you can override header value, which was set at client instance level.
+//
+// Since v2.6.0
+func (r *Request) SetHeaderVerbatim(header, value string) *Request {
+ r.Header[header] = []string{value}
+ return r
+}
+
+// SetQueryParam method sets single parameter and its value in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+// client.R().
+// SetQueryParam("search", "kitchen papers").
+// SetQueryParam("size", "large")
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParam(param, value string) *Request {
+ r.QueryParam.Set(param, value)
+ return r
+}
+
+// SetQueryParams method sets multiple parameters and its values at one go in the current request.
+// It will be formed as query string for the request.
+//
+// For Example: `search=kitchen%20papers&size=large` in the URL after `?` mark.
+// client.R().
+// SetQueryParams(map[string]string{
+// "search": "kitchen papers",
+// "size": "large",
+// })
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetQueryParam(p, v)
+ }
+ return r
+}
+
+// SetQueryParamsFromValues method appends multiple parameters with multi-value
+// (`url.Values`) at one go in the current request. It will be formed as
+// query string for the request.
+//
+// For Example: `status=pending&status=approved&status=open` in the URL after `?` mark.
+// client.R().
+// SetQueryParamsFromValues(url.Values{
+// "status": []string{"pending", "approved", "open"},
+// })
+// Also you can override query params value, which was set at client instance level.
+func (r *Request) SetQueryParamsFromValues(params url.Values) *Request {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ return r
+}
+
+// SetQueryString method provides ability to use string as an input to set URL query string for the request.
+//
+// Using String as an input
+// client.R().
+// SetQueryString("productId=232&template=fresh-sample&cat=resty&source=google&kw=buy a lot more")
+func (r *Request) SetQueryString(query string) *Request {
+ params, err := url.ParseQuery(strings.TrimSpace(query))
+ if err == nil {
+ for p, v := range params {
+ for _, pv := range v {
+ r.QueryParam.Add(p, pv)
+ }
+ }
+ } else {
+ r.client.log.Errorf("%v", err)
+ }
+ return r
+}
+
+// SetFormData method sets Form parameters and their values in the current request.
+// It's applicable only HTTP method `POST` and `PUT` and requests content type would be set as
+// `application/x-www-form-urlencoded`.
+// client.R().
+// SetFormData(map[string]string{
+// "access_token": "BC594900-518B-4F7E-AC75-BD37F019E08F",
+// "user_id": "3455454545",
+// })
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r.FormData.Set(k, v)
+ }
+ return r
+}
+
+// SetFormDataFromValues method appends multiple form parameters with multi-value
+// (`url.Values`) at one go in the current request.
+// client.R().
+// SetFormDataFromValues(url.Values{
+// "search_criteria": []string{"book", "glass", "pencil"},
+// })
+// Also you can override form data value, which was set at client instance level.
+func (r *Request) SetFormDataFromValues(data url.Values) *Request {
+ for k, v := range data {
+ for _, kv := range v {
+ r.FormData.Add(k, kv)
+ }
+ }
+ return r
+}
+
+// SetBody method sets the request body for the request. It supports various realtime needs as easy.
+// We can say its quite handy or powerful. Supported request body data types is `string`,
+// `[]byte`, `struct`, `map`, `slice` and `io.Reader`. Body value can be pointer or non-pointer.
+// Automatic marshalling for JSON and XML content type, if it is `struct`, `map`, or `slice`.
+//
+// Note: `io.Reader` is processed as bufferless mode while sending request.
+//
+// For Example: Struct as a body input, based on content type, it will be marshalled.
+// client.R().
+// SetBody(User{
+// Username: "jeeva@myjeeva.com",
+// Password: "welcome2resty",
+// })
+//
+// Map as a body input, based on content type, it will be marshalled.
+// client.R().
+// SetBody(map[string]interface{}{
+// "username": "jeeva@myjeeva.com",
+// "password": "welcome2resty",
+// "address": &Address{
+// Address1: "1111 This is my street",
+// Address2: "Apt 201",
+// City: "My City",
+// State: "My State",
+// ZipCode: 00000,
+// },
+// })
+//
+// String as a body input. Suitable for any need as a string input.
+// client.R().
+// SetBody(`{
+// "username": "jeeva@getrightcare.com",
+// "password": "admin"
+// }`)
+//
+// []byte as a body input. Suitable for raw request such as file upload, serialize & deserialize, etc.
+// client.R().
+// SetBody([]byte("This is my raw request, sent as-is"))
+func (r *Request) SetBody(body interface{}) *Request {
+ r.Body = body
+ return r
+}
+
+// SetResult method is to register the response `Result` object for automatic unmarshalling for the request,
+// if response status code is between 200 and 299 and content type either JSON or XML.
+//
+// Note: Result object can be pointer or non-pointer.
+// client.R().SetResult(&AuthToken{})
+// // OR
+// client.R().SetResult(AuthToken{})
+//
+// Accessing a result value from response instance.
+// response.Result().(*AuthToken)
+func (r *Request) SetResult(res interface{}) *Request {
+ r.Result = getPointer(res)
+ return r
+}
+
+// SetError method is to register the request `Error` object for automatic unmarshalling for the request,
+// if response status code is greater than 399 and content type either JSON or XML.
+//
+// Note: Error object can be pointer or non-pointer.
+// client.R().SetError(&AuthError{})
+// // OR
+// client.R().SetError(AuthError{})
+//
+// Accessing a error value from response instance.
+// response.Error().(*AuthError)
+func (r *Request) SetError(err interface{}) *Request {
+ r.Error = getPointer(err)
+ return r
+}
+
+// SetFile method is to set single file field name and its path for multipart upload.
+// client.R().
+// SetFile("my_file", "/Users/jeeva/Gas Bill - Sep.pdf")
+func (r *Request) SetFile(param, filePath string) *Request {
+ r.isMultiPart = true
+ r.FormData.Set("@"+param, filePath)
+ return r
+}
+
+// SetFiles method is to set multiple file field name and its path for multipart upload.
+// client.R().
+// SetFiles(map[string]string{
+// "my_file1": "/Users/jeeva/Gas Bill - Sep.pdf",
+// "my_file2": "/Users/jeeva/Electricity Bill - Sep.pdf",
+// "my_file3": "/Users/jeeva/Water Bill - Sep.pdf",
+// })
+func (r *Request) SetFiles(files map[string]string) *Request {
+ r.isMultiPart = true
+ for f, fp := range files {
+ r.FormData.Set("@"+f, fp)
+ }
+ return r
+}
+
+// SetFileReader method is to set single file using io.Reader for multipart upload.
+// client.R().
+// SetFileReader("profile_img", "my-profile-img.png", bytes.NewReader(profileImgBytes)).
+// SetFileReader("notes", "user-notes.txt", bytes.NewReader(notesBytes))
+func (r *Request) SetFileReader(param, fileName string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFiles = append(r.multipartFiles, &File{
+ Name: fileName,
+ ParamName: param,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFormData method allows simple form data to be attached to the request as `multipart:form-data`
+func (r *Request) SetMultipartFormData(data map[string]string) *Request {
+ for k, v := range data {
+ r = r.SetMultipartField(k, "", "", strings.NewReader(v))
+ }
+
+ return r
+}
+
+// SetMultipartField method is to set custom data using io.Reader for multipart upload.
+func (r *Request) SetMultipartField(param, fileName, contentType string, reader io.Reader) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, &MultipartField{
+ Param: param,
+ FileName: fileName,
+ ContentType: contentType,
+ Reader: reader,
+ })
+ return r
+}
+
+// SetMultipartFields method is to set multiple data fields using io.Reader for multipart upload.
+//
+// For Example:
+// client.R().SetMultipartFields(
+// &resty.MultipartField{
+// Param: "uploadManifest1",
+// FileName: "upload-file-1.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 1", "_filename" : ["file1.txt"]}}`),
+// },
+// &resty.MultipartField{
+// Param: "uploadManifest2",
+// FileName: "upload-file-2.json",
+// ContentType: "application/json",
+// Reader: strings.NewReader(`{"input": {"name": "Uploaded document 2", "_filename" : ["file2.txt"]}}`),
+// })
+//
+// If you have slice already, then simply call-
+// client.R().SetMultipartFields(fields...)
+func (r *Request) SetMultipartFields(fields ...*MultipartField) *Request {
+ r.isMultiPart = true
+ r.multipartFields = append(r.multipartFields, fields...)
+ return r
+}
+
+// SetContentLength method sets the HTTP header `Content-Length` value for current request.
+// By default Resty won't set `Content-Length`. Also you have an option to enable for every
+// request.
+//
+// See `Client.SetContentLength`
+// client.R().SetContentLength(true)
+func (r *Request) SetContentLength(l bool) *Request {
+ r.setContentLength = l
+ return r
+}
+
+// SetBasicAuth method sets the basic authentication header in the current HTTP request.
+//
+// For Example:
+// Authorization: Basic
+//
+// To set the header for username "go-resty" and password "welcome"
+// client.R().SetBasicAuth("go-resty", "welcome")
+//
+// This method overrides the credentials set by method `Client.SetBasicAuth`.
+func (r *Request) SetBasicAuth(username, password string) *Request {
+ r.UserInfo = &User{Username: username, Password: password}
+ return r
+}
+
+// SetAuthToken method sets the auth token header(Default Scheme: Bearer) in the current HTTP request. Header example:
+// Authorization: Bearer
+//
+// For Example: To set auth token BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F
+//
+// client.R().SetAuthToken("BC594900518B4F7EAC75BD37F019E08FBC594900518B4F7EAC75BD37F019E08F")
+//
+// This method overrides the Auth token set by method `Client.SetAuthToken`.
+func (r *Request) SetAuthToken(token string) *Request {
+ r.Token = token
+ return r
+}
+
+// SetAuthScheme method sets the auth token scheme type in the HTTP request. For Example:
+// Authorization:
+//
+// For Example: To set the scheme to use OAuth
+//
+// client.R().SetAuthScheme("OAuth")
+//
+// This auth header scheme gets added to all the request rasied from this client instance.
+// Also it can be overridden or set one at the request level is supported.
+//
+// Information about Auth schemes can be found in RFC7235 which is linked to below along with the page containing
+// the currently defined official authentication schemes:
+// https://tools.ietf.org/html/rfc7235
+// https://www.iana.org/assignments/http-authschemes/http-authschemes.xhtml#authschemes
+//
+// This method overrides the Authorization scheme set by method `Client.SetAuthScheme`.
+func (r *Request) SetAuthScheme(scheme string) *Request {
+ r.AuthScheme = scheme
+ return r
+}
+
+// SetOutput method sets the output file for current HTTP request. Current HTTP response will be
+// saved into given file. It is similar to `curl -o` flag. Absolute path or relative path can be used.
+// If is it relative path then output file goes under the output directory, as mentioned
+// in the `Client.SetOutputDirectory`.
+// client.R().
+// SetOutput("/Users/jeeva/Downloads/ReplyWithHeader-v5.1-beta.zip").
+// Get("http://bit.ly/1LouEKr")
+//
+// Note: In this scenario `Response.Body` might be nil.
+func (r *Request) SetOutput(file string) *Request {
+ r.outputFile = file
+ r.isSaveResponse = true
+ return r
+}
+
+// SetSRV method sets the details to query the service SRV record and execute the
+// request.
+// client.R().
+// SetSRV(SRVRecord{"web", "testservice.com"}).
+// Get("/get")
+func (r *Request) SetSRV(srv *SRVRecord) *Request {
+ r.SRV = srv
+ return r
+}
+
+// SetDoNotParseResponse method instructs `Resty` not to parse the response body automatically.
+// Resty exposes the raw response body as `io.ReadCloser`. Also do not forget to close the body,
+// otherwise you might get into connection leaks, no connection reuse.
+//
+// Note: Response middlewares are not applicable, if you use this option. Basically you have
+// taken over the control of response parsing from `Resty`.
+func (r *Request) SetDoNotParseResponse(parse bool) *Request {
+ r.notParseResponse = parse
+ return r
+}
+
+// SetPathParam method sets single URL path key-value pair in the
+// Resty current request instance.
+// client.R().SetPathParam("userId", "sample@sample.com")
+//
+// Result:
+// URL - /v1/users/{userId}/details
+// Composed URL - /v1/users/sample@sample.com/details
+// It replaces the value of the key while composing the request URL. Also you can
+// override Path Params value, which was set at client instance level.
+func (r *Request) SetPathParam(param, value string) *Request {
+ r.PathParams[param] = value
+ return r
+}
+
+// SetPathParams method sets multiple URL path key-value pairs at one go in the
+// Resty current request instance.
+// client.R().SetPathParams(map[string]string{
+// "userId": "sample@sample.com",
+// "subAccountId": "100002",
+// })
+//
+// Result:
+// URL - /v1/users/{userId}/{subAccountId}/details
+// Composed URL - /v1/users/sample@sample.com/100002/details
+// It replaces the value of the key while composing request URL. Also you can
+// override Path Params value, which was set at client instance level.
+func (r *Request) SetPathParams(params map[string]string) *Request {
+ for p, v := range params {
+ r.SetPathParam(p, v)
+ }
+ return r
+}
+
+// ExpectContentType method allows to provide fallback `Content-Type` for automatic unmarshalling
+// when `Content-Type` response header is unavailable.
+func (r *Request) ExpectContentType(contentType string) *Request {
+ r.fallbackContentType = contentType
+ return r
+}
+
+// ForceContentType method provides a strong sense of response `Content-Type` for automatic unmarshalling.
+// Resty gives this a higher priority than the `Content-Type` response header. This means that if both
+// `Request.ForceContentType` is set and the response `Content-Type` is available, `ForceContentType` will win.
+func (r *Request) ForceContentType(contentType string) *Request {
+ r.forceContentType = contentType
+ return r
+}
+
+// SetJSONEscapeHTML method is to enable/disable the HTML escape on JSON marshal.
+//
+// Note: This option only applicable to standard JSON Marshaller.
+func (r *Request) SetJSONEscapeHTML(b bool) *Request {
+ r.jsonEscapeHTML = b
+ return r
+}
+
+// SetCookie method appends a single cookie in the current request instance.
+// client.R().SetCookie(&http.Cookie{
+// Name:"go-resty",
+// Value:"This is cookie value",
+// })
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookie(hc *http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, hc)
+ return r
+}
+
+// SetCookies method sets an array of cookies in the current request instance.
+// cookies := []*http.Cookie{
+// &http.Cookie{
+// Name:"go-resty-1",
+// Value:"This is cookie 1 value",
+// },
+// &http.Cookie{
+// Name:"go-resty-2",
+// Value:"This is cookie 2 value",
+// },
+// }
+//
+// // Setting a cookies into resty's current request
+// client.R().SetCookies(cookies)
+//
+// Note: Method appends the Cookie value into existing Cookie if already existing.
+//
+// Since v2.1.0
+func (r *Request) SetCookies(rs []*http.Cookie) *Request {
+ r.Cookies = append(r.Cookies, rs...)
+ return r
+}
+
+// AddRetryCondition method adds a retry condition function to the request's
+// array of functions that are checked to determine if the request is retried.
+// The request will retry if any of the functions return true and error is nil.
+//
+// Note: These retry conditions are checked before all retry conditions of the client.
+//
+// Since v2.7.0
+func (r *Request) AddRetryCondition(condition RetryConditionFunc) *Request {
+ r.retryConditions = append(r.retryConditions, condition)
+ return r
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP request tracing
+//_______________________________________________________________________
+
+// EnableTrace method enables trace for the current request
+// using `httptrace.ClientTrace` and provides insights.
+//
+// client := resty.New()
+//
+// resp, err := client.R().EnableTrace().Get("https://httpbin.org/get")
+// fmt.Println("Error:", err)
+// fmt.Println("Trace Info:", resp.Request.TraceInfo())
+//
+// See `Client.EnableTrace` available too to get trace info for all requests.
+//
+// Since v2.0.0
+func (r *Request) EnableTrace() *Request {
+ r.trace = true
+ return r
+}
+
+// TraceInfo method returns the trace info for the request.
+// If either the Client or Request EnableTrace function has not been called
+// prior to the request being made, an empty TraceInfo object will be returned.
+//
+// Since v2.0.0
+func (r *Request) TraceInfo() TraceInfo {
+ ct := r.clientTrace
+
+ if ct == nil {
+ return TraceInfo{}
+ }
+
+ ti := TraceInfo{
+ DNSLookup: ct.dnsDone.Sub(ct.dnsStart),
+ TLSHandshake: ct.tlsHandshakeDone.Sub(ct.tlsHandshakeStart),
+ ServerTime: ct.gotFirstResponseByte.Sub(ct.gotConn),
+ IsConnReused: ct.gotConnInfo.Reused,
+ IsConnWasIdle: ct.gotConnInfo.WasIdle,
+ ConnIdleTime: ct.gotConnInfo.IdleTime,
+ RequestAttempt: r.Attempt,
+ }
+
+ // Calculate the total time accordingly,
+ // when connection is reused
+ if ct.gotConnInfo.Reused {
+ ti.TotalTime = ct.endTime.Sub(ct.getConn)
+ } else {
+ ti.TotalTime = ct.endTime.Sub(ct.dnsStart)
+ }
+
+ // Only calculate on successful connections
+ if !ct.connectDone.IsZero() {
+ ti.TCPConnTime = ct.connectDone.Sub(ct.dnsDone)
+ }
+
+ // Only calculate on successful connections
+ if !ct.gotConn.IsZero() {
+ ti.ConnTime = ct.gotConn.Sub(ct.getConn)
+ }
+
+ // Only calculate on successful connections
+ if !ct.gotFirstResponseByte.IsZero() {
+ ti.ResponseTime = ct.endTime.Sub(ct.gotFirstResponseByte)
+ }
+
+ // Capture remote address info when connection is non-nil
+ if ct.gotConnInfo.Conn != nil {
+ ti.RemoteAddr = ct.gotConnInfo.Conn.RemoteAddr()
+ }
+
+ return ti
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// HTTP verb method starts here
+//_______________________________________________________________________
+
+// Get method does GET HTTP request. It's defined in section 4.3.1 of RFC7231.
+func (r *Request) Get(url string) (*Response, error) {
+ return r.Execute(MethodGet, url)
+}
+
+// Head method does HEAD HTTP request. It's defined in section 4.3.2 of RFC7231.
+func (r *Request) Head(url string) (*Response, error) {
+ return r.Execute(MethodHead, url)
+}
+
+// Post method does POST HTTP request. It's defined in section 4.3.3 of RFC7231.
+func (r *Request) Post(url string) (*Response, error) {
+ return r.Execute(MethodPost, url)
+}
+
+// Put method does PUT HTTP request. It's defined in section 4.3.4 of RFC7231.
+func (r *Request) Put(url string) (*Response, error) {
+ return r.Execute(MethodPut, url)
+}
+
+// Delete method does DELETE HTTP request. It's defined in section 4.3.5 of RFC7231.
+func (r *Request) Delete(url string) (*Response, error) {
+ return r.Execute(MethodDelete, url)
+}
+
+// Options method does OPTIONS HTTP request. It's defined in section 4.3.7 of RFC7231.
+func (r *Request) Options(url string) (*Response, error) {
+ return r.Execute(MethodOptions, url)
+}
+
+// Patch method does PATCH HTTP request. It's defined in section 2 of RFC5789.
+func (r *Request) Patch(url string) (*Response, error) {
+ return r.Execute(MethodPatch, url)
+}
+
+// Send method performs the HTTP request using the method and URL already defined
+// for current `Request`.
+// req := client.R()
+// req.Method = resty.GET
+// req.URL = "http://httpbin.org/get"
+// resp, err := client.R().Send()
+func (r *Request) Send() (*Response, error) {
+ return r.Execute(r.Method, r.URL)
+}
+
+// Execute method performs the HTTP request with given HTTP method and URL
+// for current `Request`.
+// resp, err := client.R().Execute(resty.GET, "http://httpbin.org/get")
+func (r *Request) Execute(method, url string) (*Response, error) {
+ var addrs []*net.SRV
+ var resp *Response
+ var err error
+
+ if r.isMultiPart && !(method == MethodPost || method == MethodPut || method == MethodPatch) {
+ // No OnError hook here since this is a request validation error
+ return nil, fmt.Errorf("multipart content is not allowed in HTTP verb [%v]", method)
+ }
+
+ if r.SRV != nil {
+ _, addrs, err = net.LookupSRV(r.SRV.Service, "tcp", r.SRV.Domain)
+ if err != nil {
+ r.client.onErrorHooks(r, nil, err)
+ return nil, err
+ }
+ }
+
+ r.Method = method
+ r.URL = r.selectAddr(addrs, url, 0)
+
+ if r.client.RetryCount == 0 {
+ r.Attempt = 1
+ resp, err = r.client.execute(r)
+ r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
+ return resp, unwrapNoRetryErr(err)
+ }
+
+ err = Backoff(
+ func() (*Response, error) {
+ r.Attempt++
+
+ r.URL = r.selectAddr(addrs, url, r.Attempt)
+
+ resp, err = r.client.execute(r)
+ if err != nil {
+ r.client.log.Errorf("%v, Attempt %v", err, r.Attempt)
+ }
+
+ return resp, err
+ },
+ Retries(r.client.RetryCount),
+ WaitTime(r.client.RetryWaitTime),
+ MaxWaitTime(r.client.RetryMaxWaitTime),
+ RetryConditions(append(r.retryConditions, r.client.RetryConditions...)),
+ RetryHooks(r.client.RetryHooks),
+ )
+
+ r.client.onErrorHooks(r, resp, unwrapNoRetryErr(err))
+
+ return resp, unwrapNoRetryErr(err)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// SRVRecord struct
+//_______________________________________________________________________
+
+// SRVRecord struct holds the data to query the SRV record for the
+// following service.
+type SRVRecord struct {
+ Service string
+ Domain string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Request Unexported methods
+//_______________________________________________________________________
+
+func (r *Request) fmtBodyString(sl int64) (body string) {
+ body = "***** NO CONTENT *****"
+ if !isPayloadSupported(r.Method, r.client.AllowGetMethodPayload) {
+ return
+ }
+
+ if _, ok := r.Body.(io.Reader); ok {
+ body = "***** BODY IS io.Reader *****"
+ return
+ }
+
+ // multipart or form-data
+ if r.isMultiPart || r.isFormData {
+ bodySize := int64(r.bodyBuf.Len())
+ if bodySize > sl {
+ body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
+ return
+ }
+ body = r.bodyBuf.String()
+ return
+ }
+
+ // request body data
+ if r.Body == nil {
+ return
+ }
+ var prtBodyBytes []byte
+ var err error
+
+ contentType := r.Header.Get(hdrContentTypeKey)
+ kind := kindOf(r.Body)
+ if canJSONMarshal(contentType, kind) {
+ prtBodyBytes, err = json.MarshalIndent(&r.Body, "", " ")
+ } else if IsXMLType(contentType) && (kind == reflect.Struct) {
+ prtBodyBytes, err = xml.MarshalIndent(&r.Body, "", " ")
+ } else if b, ok := r.Body.(string); ok {
+ if IsJSONType(contentType) {
+ bodyBytes := []byte(b)
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ if err = json.Indent(out, bodyBytes, "", " "); err == nil {
+ prtBodyBytes = out.Bytes()
+ }
+ } else {
+ body = b
+ }
+ } else if b, ok := r.Body.([]byte); ok {
+ body = fmt.Sprintf("***** BODY IS byte(s) (size - %d) *****", len(b))
+ return
+ }
+
+ if prtBodyBytes != nil && err == nil {
+ body = string(prtBodyBytes)
+ }
+
+ if len(body) > 0 {
+ bodySize := int64(len([]byte(body)))
+ if bodySize > sl {
+ body = fmt.Sprintf("***** REQUEST TOO LARGE (size - %d) *****", bodySize)
+ }
+ }
+
+ return
+}
+
+func (r *Request) selectAddr(addrs []*net.SRV, path string, attempt int) string {
+ if addrs == nil {
+ return path
+ }
+
+ idx := attempt % len(addrs)
+ domain := strings.TrimRight(addrs[idx].Target, ".")
+ path = strings.TrimLeft(path, "/")
+
+ return fmt.Sprintf("%s://%s:%d/%s", r.client.scheme, domain, addrs[idx].Port, path)
+}
+
+func (r *Request) initValuesMap() {
+ if r.values == nil {
+ r.values = make(map[string]interface{})
+ }
+}
+
+var noescapeJSONMarshal = func(v interface{}) (*bytes.Buffer, error) {
+ buf := acquireBuffer()
+ encoder := json.NewEncoder(buf)
+ encoder.SetEscapeHTML(false)
+ if err := encoder.Encode(v); err != nil {
+ releaseBuffer(buf)
+ return nil, err
+ }
+
+ return buf, nil
+}
diff --git a/vendor/github.com/go-resty/resty/v2/response.go b/vendor/github.com/go-resty/resty/v2/response.go
new file mode 100644
index 0000000..8ae0e10
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/response.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response struct and methods
+//_______________________________________________________________________
+
+// Response struct holds response values of executed request.
+type Response struct {
+ Request *Request
+ RawResponse *http.Response
+
+ body []byte
+ size int64
+ receivedAt time.Time
+}
+
+// Body method returns HTTP response as []byte array for the executed request.
+//
+// Note: `Response.Body` might be nil, if `Request.SetOutput` is used.
+func (r *Response) Body() []byte {
+ if r.RawResponse == nil {
+ return []byte{}
+ }
+ return r.body
+}
+
+// Status method returns the HTTP status string for the executed request.
+// Example: 200 OK
+func (r *Response) Status() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Status
+}
+
+// StatusCode method returns the HTTP status code for the executed request.
+// Example: 200
+func (r *Response) StatusCode() int {
+ if r.RawResponse == nil {
+ return 0
+ }
+ return r.RawResponse.StatusCode
+}
+
+// Proto method returns the HTTP response protocol used for the request.
+func (r *Response) Proto() string {
+ if r.RawResponse == nil {
+ return ""
+ }
+ return r.RawResponse.Proto
+}
+
+// Result method returns the response value as an object if it has one
+func (r *Response) Result() interface{} {
+ return r.Request.Result
+}
+
+// Error method returns the error object if it has one
+func (r *Response) Error() interface{} {
+ return r.Request.Error
+}
+
+// Header method returns the response headers
+func (r *Response) Header() http.Header {
+ if r.RawResponse == nil {
+ return http.Header{}
+ }
+ return r.RawResponse.Header
+}
+
+// Cookies method to access all the response cookies
+func (r *Response) Cookies() []*http.Cookie {
+ if r.RawResponse == nil {
+ return make([]*http.Cookie, 0)
+ }
+ return r.RawResponse.Cookies()
+}
+
+// String method returns the body of the server response as String.
+func (r *Response) String() string {
+ if r.body == nil {
+ return ""
+ }
+ return strings.TrimSpace(string(r.body))
+}
+
+// Time method returns the time of HTTP response time that from request we sent and received a request.
+//
+// See `Response.ReceivedAt` to know when client received response and see `Response.Request.Time` to know
+// when client sent a request.
+func (r *Response) Time() time.Duration {
+ if r.Request.clientTrace != nil {
+ return r.Request.TraceInfo().TotalTime
+ }
+ return r.receivedAt.Sub(r.Request.Time)
+}
+
+// ReceivedAt method returns when response got received from server for the request.
+func (r *Response) ReceivedAt() time.Time {
+ return r.receivedAt
+}
+
+// Size method returns the HTTP response size in bytes. Ya, you can relay on HTTP `Content-Length` header,
+// however it won't be good for chucked transfer/compressed response. Since Resty calculates response size
+// at the client end. You will get actual size of the http response.
+func (r *Response) Size() int64 {
+ return r.size
+}
+
+// RawBody method exposes the HTTP raw response body. Use this method in-conjunction with `SetDoNotParseResponse`
+// option otherwise you get an error as `read err: http: read on closed response body`.
+//
+// Do not forget to close the body, otherwise you might get into connection leaks, no connection reuse.
+// Basically you have taken over the control of response parsing from `Resty`.
+func (r *Response) RawBody() io.ReadCloser {
+ if r.RawResponse == nil {
+ return nil
+ }
+ return r.RawResponse.Body
+}
+
+// IsSuccess method returns true if HTTP status `code >= 200 and <= 299` otherwise false.
+func (r *Response) IsSuccess() bool {
+ return r.StatusCode() > 199 && r.StatusCode() < 300
+}
+
+// IsError method returns true if HTTP status `code >= 400` otherwise false.
+func (r *Response) IsError() bool {
+ return r.StatusCode() > 399
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Response Unexported methods
+//_______________________________________________________________________
+
+func (r *Response) setReceivedAt() {
+ r.receivedAt = time.Now()
+ if r.Request.clientTrace != nil {
+ r.Request.clientTrace.endTime = r.receivedAt
+ }
+}
+
+func (r *Response) fmtBodyString(sl int64) string {
+ if r.body != nil {
+ if int64(len(r.body)) > sl {
+ return fmt.Sprintf("***** RESPONSE TOO LARGE (size - %d) *****", len(r.body))
+ }
+ ct := r.Header().Get(hdrContentTypeKey)
+ if IsJSONType(ct) {
+ out := acquireBuffer()
+ defer releaseBuffer(out)
+ err := json.Indent(out, r.body, "", " ")
+ if err != nil {
+ return fmt.Sprintf("*** Error: Unable to format response body - \"%s\" ***\n\nLog Body as-is:\n%s", err, r.String())
+ }
+ return out.String()
+ }
+ return r.String()
+ }
+
+ return "***** NO CONTENT *****"
+}
diff --git a/vendor/github.com/go-resty/resty/v2/resty.go b/vendor/github.com/go-resty/resty/v2/resty.go
new file mode 100644
index 0000000..6f9c8b4
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/resty.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+// Package resty provides Simple HTTP and REST client library for Go.
+package resty
+
+import (
+ "net"
+ "net/http"
+ "net/http/cookiejar"
+
+ "golang.org/x/net/publicsuffix"
+)
+
+// Version # of resty
+const Version = "2.7.0"
+
+// New method creates a new Resty client.
+func New() *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ })
+}
+
+// NewWithClient method creates a new Resty client with given `http.Client`.
+func NewWithClient(hc *http.Client) *Client {
+ return createClient(hc)
+}
+
+// NewWithLocalAddr method creates a new Resty client with given Local Address
+// to dial from.
+func NewWithLocalAddr(localAddr net.Addr) *Client {
+ cookieJar, _ := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ return createClient(&http.Client{
+ Jar: cookieJar,
+ Transport: createTransport(localAddr),
+ })
+}
diff --git a/vendor/github.com/go-resty/resty/v2/retry.go b/vendor/github.com/go-resty/resty/v2/retry.go
new file mode 100644
index 0000000..00b8514
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/retry.go
@@ -0,0 +1,221 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "math"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+const (
+ defaultMaxRetries = 3
+ defaultWaitTime = time.Duration(100) * time.Millisecond
+ defaultMaxWaitTime = time.Duration(2000) * time.Millisecond
+)
+
+type (
+ // Option is to create convenient retry options like wait time, max retries, etc.
+ Option func(*Options)
+
+ // RetryConditionFunc type is for retry condition function
+ // input: non-nil Response OR request execution error
+ RetryConditionFunc func(*Response, error) bool
+
+ // OnRetryFunc is for side-effecting functions triggered on retry
+ OnRetryFunc func(*Response, error)
+
+ // RetryAfterFunc returns time to wait before retry
+ // For example, it can parse HTTP Retry-After header
+ // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+ // Non-nil error is returned if it is found that request is not retryable
+ // (0, nil) is a special result means 'use default algorithm'
+ RetryAfterFunc func(*Client, *Response) (time.Duration, error)
+
+ // Options struct is used to hold retry settings.
+ Options struct {
+ maxRetries int
+ waitTime time.Duration
+ maxWaitTime time.Duration
+ retryConditions []RetryConditionFunc
+ retryHooks []OnRetryFunc
+ }
+)
+
+// Retries sets the max number of retries
+func Retries(value int) Option {
+ return func(o *Options) {
+ o.maxRetries = value
+ }
+}
+
+// WaitTime sets the default wait time to sleep between requests
+func WaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.waitTime = value
+ }
+}
+
+// MaxWaitTime sets the max wait time to sleep between requests
+func MaxWaitTime(value time.Duration) Option {
+ return func(o *Options) {
+ o.maxWaitTime = value
+ }
+}
+
+// RetryConditions sets the conditions that will be checked for retry.
+func RetryConditions(conditions []RetryConditionFunc) Option {
+ return func(o *Options) {
+ o.retryConditions = conditions
+ }
+}
+
+// RetryHooks sets the hooks that will be executed after each retry
+func RetryHooks(hooks []OnRetryFunc) Option {
+ return func(o *Options) {
+ o.retryHooks = hooks
+ }
+}
+
+// Backoff retries with increasing timeout duration up until X amount of retries
+// (Default is 3 attempts, Override with option Retries(n))
+func Backoff(operation func() (*Response, error), options ...Option) error {
+ // Defaults
+ opts := Options{
+ maxRetries: defaultMaxRetries,
+ waitTime: defaultWaitTime,
+ maxWaitTime: defaultMaxWaitTime,
+ retryConditions: []RetryConditionFunc{},
+ }
+
+ for _, o := range options {
+ o(&opts)
+ }
+
+ var (
+ resp *Response
+ err error
+ )
+
+ for attempt := 0; attempt <= opts.maxRetries; attempt++ {
+ resp, err = operation()
+ ctx := context.Background()
+ if resp != nil && resp.Request.ctx != nil {
+ ctx = resp.Request.ctx
+ }
+ if ctx.Err() != nil {
+ return err
+ }
+
+ err1 := unwrapNoRetryErr(err) // raw error, it used for return users callback.
+ needsRetry := err != nil && err == err1 // retry on a few operation errors by default
+
+ for _, condition := range opts.retryConditions {
+ needsRetry = condition(resp, err1)
+ if needsRetry {
+ break
+ }
+ }
+
+ if !needsRetry {
+ return err
+ }
+
+ for _, hook := range opts.retryHooks {
+ hook(resp, err)
+ }
+
+ // Don't need to wait when no retries left.
+ // Still run retry hooks even on last retry to keep compatibility.
+ if attempt == opts.maxRetries {
+ return err
+ }
+
+ waitTime, err2 := sleepDuration(resp, opts.waitTime, opts.maxWaitTime, attempt)
+ if err2 != nil {
+ if err == nil {
+ err = err2
+ }
+ return err
+ }
+
+ select {
+ case <-time.After(waitTime):
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ return err
+}
+
+func sleepDuration(resp *Response, min, max time.Duration, attempt int) (time.Duration, error) {
+ const maxInt = 1<<31 - 1 // max int for arch 386
+ if max < 0 {
+ max = maxInt
+ }
+ if resp == nil {
+ return jitterBackoff(min, max, attempt), nil
+ }
+
+ retryAfterFunc := resp.Request.client.RetryAfter
+
+ // Check for custom callback
+ if retryAfterFunc == nil {
+ return jitterBackoff(min, max, attempt), nil
+ }
+
+ result, err := retryAfterFunc(resp.Request.client, resp)
+ if err != nil {
+ return 0, err // i.e. 'API quota exceeded'
+ }
+ if result == 0 {
+ return jitterBackoff(min, max, attempt), nil
+ }
+ if result < 0 || max < result {
+ result = max
+ }
+ if result < min {
+ result = min
+ }
+ return result, nil
+}
+
+// Return capped exponential backoff with jitter
+// http://www.awsarchitectureblog.com/2015/03/backoff.html
+func jitterBackoff(min, max time.Duration, attempt int) time.Duration {
+ base := float64(min)
+ capLevel := float64(max)
+
+ temp := math.Min(capLevel, base*math.Exp2(float64(attempt)))
+ ri := time.Duration(temp / 2)
+ result := randDuration(ri)
+
+ if result < min {
+ result = min
+ }
+
+ return result
+}
+
+var rnd = newRnd()
+var rndMu sync.Mutex
+
+func randDuration(center time.Duration) time.Duration {
+ rndMu.Lock()
+ defer rndMu.Unlock()
+
+ var ri = int64(center)
+ var jitter = rnd.Int63n(ri)
+ return time.Duration(math.Abs(float64(ri + jitter)))
+}
+
+func newRnd() *rand.Rand {
+ var seed = time.Now().UnixNano()
+ var src = rand.NewSource(seed)
+ return rand.New(src)
+}
diff --git a/vendor/github.com/go-resty/resty/v2/trace.go b/vendor/github.com/go-resty/resty/v2/trace.go
new file mode 100644
index 0000000..23cf703
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/trace.go
@@ -0,0 +1,130 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http/httptrace"
+ "time"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// TraceInfo struct
+//_______________________________________________________________________
+
+// TraceInfo struct is used provide request trace info such as DNS lookup
+// duration, Connection obtain duration, Server processing duration, etc.
+//
+// Since v2.0.0
+type TraceInfo struct {
+ // DNSLookup is a duration that transport took to perform
+ // DNS lookup.
+ DNSLookup time.Duration
+
+ // ConnTime is a duration that took to obtain a successful connection.
+ ConnTime time.Duration
+
+ // TCPConnTime is a duration that took to obtain the TCP connection.
+ TCPConnTime time.Duration
+
+ // TLSHandshake is a duration that TLS handshake took place.
+ TLSHandshake time.Duration
+
+ // ServerTime is a duration that server took to respond first byte.
+ ServerTime time.Duration
+
+ // ResponseTime is a duration since first response byte from server to
+ // request completion.
+ ResponseTime time.Duration
+
+ // TotalTime is a duration that total request took end-to-end.
+ TotalTime time.Duration
+
+ // IsConnReused is whether this connection has been previously
+ // used for another HTTP request.
+ IsConnReused bool
+
+ // IsConnWasIdle is whether this connection was obtained from an
+ // idle pool.
+ IsConnWasIdle bool
+
+ // ConnIdleTime is a duration how long the connection was previously
+ // idle, if IsConnWasIdle is true.
+ ConnIdleTime time.Duration
+
+ // RequestAttempt is to represent the request attempt made during a Resty
+ // request execution flow, including retry count.
+ RequestAttempt int
+
+ // RemoteAddr returns the remote network address.
+ RemoteAddr net.Addr
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// ClientTrace struct and its methods
+//_______________________________________________________________________
+
+// tracer struct maps the `httptrace.ClientTrace` hooks into Fields
+// with same naming for easy understanding. Plus additional insights
+// Request.
+type clientTrace struct {
+ getConn time.Time
+ dnsStart time.Time
+ dnsDone time.Time
+ connectDone time.Time
+ tlsHandshakeStart time.Time
+ tlsHandshakeDone time.Time
+ gotConn time.Time
+ gotFirstResponseByte time.Time
+ endTime time.Time
+ gotConnInfo httptrace.GotConnInfo
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Trace unexported methods
+//_______________________________________________________________________
+
+func (t *clientTrace) createContext(ctx context.Context) context.Context {
+ return httptrace.WithClientTrace(
+ ctx,
+ &httptrace.ClientTrace{
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ t.dnsStart = time.Now()
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ t.dnsDone = time.Now()
+ },
+ ConnectStart: func(_, _ string) {
+ if t.dnsDone.IsZero() {
+ t.dnsDone = time.Now()
+ }
+ if t.dnsStart.IsZero() {
+ t.dnsStart = t.dnsDone
+ }
+ },
+ ConnectDone: func(net, addr string, err error) {
+ t.connectDone = time.Now()
+ },
+ GetConn: func(_ string) {
+ t.getConn = time.Now()
+ },
+ GotConn: func(ci httptrace.GotConnInfo) {
+ t.gotConn = time.Now()
+ t.gotConnInfo = ci
+ },
+ GotFirstResponseByte: func() {
+ t.gotFirstResponseByte = time.Now()
+ },
+ TLSHandshakeStart: func() {
+ t.tlsHandshakeStart = time.Now()
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, _ error) {
+ t.tlsHandshakeDone = time.Now()
+ },
+ },
+ )
+}
diff --git a/vendor/github.com/go-resty/resty/v2/transport.go b/vendor/github.com/go-resty/resty/v2/transport.go
new file mode 100644
index 0000000..e15b48c
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/transport.go
@@ -0,0 +1,35 @@
+// +build go1.13
+
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ ForceAttemptHTTP2: true,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/transport112.go b/vendor/github.com/go-resty/resty/v2/transport112.go
new file mode 100644
index 0000000..fbbbc59
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/transport112.go
@@ -0,0 +1,34 @@
+// +build !go1.13
+
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+func createTransport(localAddr net.Addr) *http.Transport {
+ dialer := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ if localAddr != nil {
+ dialer.LocalAddr = localAddr
+ }
+ return &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialer.DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+}
diff --git a/vendor/github.com/go-resty/resty/v2/util.go b/vendor/github.com/go-resty/resty/v2/util.go
new file mode 100644
index 0000000..1d563be
--- /dev/null
+++ b/vendor/github.com/go-resty/resty/v2/util.go
@@ -0,0 +1,391 @@
+// Copyright (c) 2015-2021 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
+// resty source code and usage is governed by a MIT style
+// license that can be found in the LICENSE file.
+
+package resty
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "mime/multipart"
+ "net/http"
+ "net/textproto"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+ "sync"
+)
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Logger interface
+//_______________________________________________________________________
+
+// Logger interface is to abstract the logging from Resty. Gives control to
+// the Resty users, choice of the logger.
+type Logger interface {
+ Errorf(format string, v ...interface{})
+ Warnf(format string, v ...interface{})
+ Debugf(format string, v ...interface{})
+}
+
+func createLogger() *logger {
+ l := &logger{l: log.New(os.Stderr, "", log.Ldate|log.Lmicroseconds)}
+ return l
+}
+
+var _ Logger = (*logger)(nil)
+
+type logger struct {
+ l *log.Logger
+}
+
+func (l *logger) Errorf(format string, v ...interface{}) {
+ l.output("ERROR RESTY "+format, v...)
+}
+
+func (l *logger) Warnf(format string, v ...interface{}) {
+ l.output("WARN RESTY "+format, v...)
+}
+
+func (l *logger) Debugf(format string, v ...interface{}) {
+ l.output("DEBUG RESTY "+format, v...)
+}
+
+func (l *logger) output(format string, v ...interface{}) {
+ if len(v) == 0 {
+ l.l.Print(format)
+ return
+ }
+ l.l.Printf(format, v...)
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Helper methods
+//_______________________________________________________________________
+
+// IsStringEmpty method tells whether given string is empty or not
+func IsStringEmpty(str string) bool {
+ return len(strings.TrimSpace(str)) == 0
+}
+
+// DetectContentType method is used to figure out `Request.Body` content type for request header
+func DetectContentType(body interface{}) string {
+ contentType := plainTextType
+ kind := kindOf(body)
+ switch kind {
+ case reflect.Struct, reflect.Map:
+ contentType = jsonContentType
+ case reflect.String:
+ contentType = plainTextType
+ default:
+ if b, ok := body.([]byte); ok {
+ contentType = http.DetectContentType(b)
+ } else if kind == reflect.Slice {
+ contentType = jsonContentType
+ }
+ }
+
+ return contentType
+}
+
+// IsJSONType method is to check JSON content type or not
+func IsJSONType(ct string) bool {
+ return jsonCheck.MatchString(ct)
+}
+
+// IsXMLType method is to check XML content type or not
+func IsXMLType(ct string) bool {
+ return xmlCheck.MatchString(ct)
+}
+
+// Unmarshalc content into object from JSON or XML
+func Unmarshalc(c *Client, ct string, b []byte, d interface{}) (err error) {
+ if IsJSONType(ct) {
+ err = c.JSONUnmarshal(b, d)
+ } else if IsXMLType(ct) {
+ err = c.XMLUnmarshal(b, d)
+ }
+
+ return
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// RequestLog and ResponseLog type
+//_______________________________________________________________________
+
+// RequestLog struct is used to collected information from resty request
+// instance for debug logging. It sent to request log callback before resty
+// actually logs the information.
+type RequestLog struct {
+ Header http.Header
+ Body string
+}
+
+// ResponseLog struct is used to collected information from resty response
+// instance for debug logging. It sent to response log callback before resty
+// actually logs the information.
+type ResponseLog struct {
+ Header http.Header
+ Body string
+}
+
+//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
+// Package Unexported methods
+//_______________________________________________________________________
+
+// way to disable the HTML escape as opt-in
+func jsonMarshal(c *Client, r *Request, d interface{}) (*bytes.Buffer, error) {
+ if !r.jsonEscapeHTML || !c.jsonEscapeHTML {
+ return noescapeJSONMarshal(d)
+ }
+
+ data, err := c.JSONMarshal(d)
+ if err != nil {
+ return nil, err
+ }
+
+ buf := acquireBuffer()
+ _, _ = buf.Write(data)
+ return buf, nil
+}
+
+func firstNonEmpty(v ...string) string {
+ for _, s := range v {
+ if !IsStringEmpty(s) {
+ return s
+ }
+ }
+ return ""
+}
+
+var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
+
+func escapeQuotes(s string) string {
+ return quoteEscaper.Replace(s)
+}
+
+func createMultipartHeader(param, fileName, contentType string) textproto.MIMEHeader {
+ hdr := make(textproto.MIMEHeader)
+
+ var contentDispositionValue string
+ if IsStringEmpty(fileName) {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"`, param)
+ } else {
+ contentDispositionValue = fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
+ param, escapeQuotes(fileName))
+ }
+ hdr.Set("Content-Disposition", contentDispositionValue)
+
+ if !IsStringEmpty(contentType) {
+ hdr.Set(hdrContentTypeKey, contentType)
+ }
+ return hdr
+}
+
+func addMultipartFormField(w *multipart.Writer, mf *MultipartField) error {
+ partWriter, err := w.CreatePart(createMultipartHeader(mf.Param, mf.FileName, mf.ContentType))
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, mf.Reader)
+ return err
+}
+
+func writeMultipartFormFile(w *multipart.Writer, fieldName, fileName string, r io.Reader) error {
+ // Auto detect actual multipart content type
+ cbuf := make([]byte, 512)
+ size, err := r.Read(cbuf)
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ partWriter, err := w.CreatePart(createMultipartHeader(fieldName, fileName, http.DetectContentType(cbuf)))
+ if err != nil {
+ return err
+ }
+
+ if _, err = partWriter.Write(cbuf[:size]); err != nil {
+ return err
+ }
+
+ _, err = io.Copy(partWriter, r)
+ return err
+}
+
+func addFile(w *multipart.Writer, fieldName, path string) error {
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer closeq(file)
+ return writeMultipartFormFile(w, fieldName, filepath.Base(path), file)
+}
+
+func addFileReader(w *multipart.Writer, f *File) error {
+ return writeMultipartFormFile(w, f.ParamName, f.Name, f.Reader)
+}
+
+func getPointer(v interface{}) interface{} {
+ vv := valueOf(v)
+ if vv.Kind() == reflect.Ptr {
+ return v
+ }
+ return reflect.New(vv.Type()).Interface()
+}
+
+func isPayloadSupported(m string, allowMethodGet bool) bool {
+ return !(m == MethodHead || m == MethodOptions || (m == MethodGet && !allowMethodGet))
+}
+
+func typeOf(i interface{}) reflect.Type {
+ return indirect(valueOf(i)).Type()
+}
+
+func valueOf(i interface{}) reflect.Value {
+ return reflect.ValueOf(i)
+}
+
+func indirect(v reflect.Value) reflect.Value {
+ return reflect.Indirect(v)
+}
+
+func kindOf(v interface{}) reflect.Kind {
+ return typeOf(v).Kind()
+}
+
+func createDirectory(dir string) (err error) {
+ if _, err = os.Stat(dir); err != nil {
+ if os.IsNotExist(err) {
+ if err = os.MkdirAll(dir, 0755); err != nil {
+ return
+ }
+ }
+ }
+ return
+}
+
+func canJSONMarshal(contentType string, kind reflect.Kind) bool {
+ return IsJSONType(contentType) && (kind == reflect.Struct || kind == reflect.Map || kind == reflect.Slice)
+}
+
+func functionName(i interface{}) string {
+ return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
+}
+
+func acquireBuffer() *bytes.Buffer {
+ return bufPool.Get().(*bytes.Buffer)
+}
+
+func releaseBuffer(buf *bytes.Buffer) {
+ if buf != nil {
+ buf.Reset()
+ bufPool.Put(buf)
+ }
+}
+
+// requestBodyReleaser wraps requests's body and implements custom Close for it.
+// The Close method closes original body and releases request body back to sync.Pool.
+type requestBodyReleaser struct {
+ releaseOnce sync.Once
+ reqBuf *bytes.Buffer
+ io.ReadCloser
+}
+
+func newRequestBodyReleaser(respBody io.ReadCloser, reqBuf *bytes.Buffer) io.ReadCloser {
+ if reqBuf == nil {
+ return respBody
+ }
+
+ return &requestBodyReleaser{
+ reqBuf: reqBuf,
+ ReadCloser: respBody,
+ }
+}
+
+func (rr *requestBodyReleaser) Close() error {
+ err := rr.ReadCloser.Close()
+ rr.releaseOnce.Do(func() {
+ releaseBuffer(rr.reqBuf)
+ })
+
+ return err
+}
+
+func closeq(v interface{}) {
+ if c, ok := v.(io.Closer); ok {
+ silently(c.Close())
+ }
+}
+
+func silently(_ ...interface{}) {}
+
+func composeHeaders(c *Client, r *Request, hdrs http.Header) string {
+ str := make([]string, 0, len(hdrs))
+ for _, k := range sortHeaderKeys(hdrs) {
+ var v string
+ if k == "Cookie" {
+ cv := strings.TrimSpace(strings.Join(hdrs[k], ", "))
+ if c.GetClient().Jar != nil {
+ for _, c := range c.GetClient().Jar.Cookies(r.RawRequest.URL) {
+ if cv != "" {
+ cv = cv + "; " + c.String()
+ } else {
+ cv = c.String()
+ }
+ }
+ }
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, cv))
+ } else {
+ v = strings.TrimSpace(fmt.Sprintf("%25s: %s", k, strings.Join(hdrs[k], ", ")))
+ }
+ if v != "" {
+ str = append(str, "\t"+v)
+ }
+ }
+ return strings.Join(str, "\n")
+}
+
+func sortHeaderKeys(hdrs http.Header) []string {
+ keys := make([]string, 0, len(hdrs))
+ for key := range hdrs {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+func copyHeaders(hdrs http.Header) http.Header {
+ nh := http.Header{}
+ for k, v := range hdrs {
+ nh[k] = v
+ }
+ return nh
+}
+
+type noRetryErr struct {
+ err error
+}
+
+func (e *noRetryErr) Error() string {
+ return e.err.Error()
+}
+
+func wrapNoRetryErr(err error) error {
+ if err != nil {
+ err = &noRetryErr{err: err}
+ }
+ return err
+}
+
+func unwrapNoRetryErr(err error) error {
+ if e, ok := err.(*noRetryErr); ok {
+ err = e.err
+ }
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
new file mode 100644
index 0000000..2de28da
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/.gitignore
@@ -0,0 +1,9 @@
+.DS_Store
+.DS_Store?
+._*
+.Spotlight-V100
+.Trashes
+Icon?
+ehthumbs.db
+Thumbs.db
+.idea
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
new file mode 100644
index 0000000..50afa2c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -0,0 +1,117 @@
+# This is the official list of Go-MySQL-Driver authors for copyright purposes.
+
+# If you are submitting a patch, please add your name or the name of the
+# organization which holds the copyright to this list in alphabetical order.
+
+# Names should be added to this file as
+# Name
+# The email address is not required for organizations.
+# Please keep the list sorted.
+
+
+# Individual Persons
+
+Aaron Hopkins
+Achille Roussel
+Alex Snast
+Alexey Palazhchenko
+Andrew Reid
+Animesh Ray
+Arne Hormann
+Ariel Mashraki
+Asta Xie
+Bulat Gaifullin
+Caine Jette
+Carlos Nieto
+Chris Moos
+Craig Wilson
+Daniel Montoya
+Daniel Nichter
+Daniël van Eeden
+Dave Protasowski
+DisposaBoy
+Egor Smolyakov
+Erwan Martin
+Evan Shaw
+Frederick Mayle
+Gustavo Kristic
+Hajime Nakagami
+Hanno Braun
+Henri Yandell
+Hirotaka Yamamoto
+Huyiguang
+ICHINOSE Shogo
+Ilia Cimpoes
+INADA Naoki
+Jacek Szwec
+James Harr
+Jeff Hodges
+Jeffrey Charles
+Jerome Meyer
+Jiajia Zhong
+Jian Zhen
+Joshua Prunier
+Julien Lefevre
+Julien Schmidt
+Justin Li
+Justin Nuß
+Kamil Dziedzic
+Kei Kamikawa
+Kevin Malachowski
+Kieron Woodhouse
+Lennart Rudolph
+Leonardo YongUk Kim
+Linh Tran Tuan
+Lion Yang
+Luca Looz
+Lucas Liu
+Luke Scott
+Maciej Zimnoch
+Michael Woolnough
+Nathanial Murphy
+Nicola Peduzzi
+Olivier Mengué
+oscarzhao
+Paul Bonser
+Peter Schultz
+Rebecca Chin
+Reed Allman
+Richard Wilkes
+Robert Russell
+Runrioter Wung
+Sho Iizuka
+Sho Ikeda
+Shuode Li
+Simon J Mudd
+Soroush Pour
+Stan Putrya
+Stanley Gunawan
+Steven Hartland
+Tan Jinhua <312841925 at qq.com>
+Thomas Wodarek
+Tim Ruffles
+Tom Jenkinson
+Vladimir Kovpak
+Vladyslav Zhelezniak
+Xiangyu Hu
+Xiaobing Jiang
+Xiuming Chen
+Xuehong Chan
+Zhenye Xie
+Zhixin Wen
+
+# Organizations
+
+Barracuda Networks, Inc.
+Counting Ltd.
+DigitalOcean Inc.
+Facebook Inc.
+GitHub Inc.
+Google Inc.
+InfoSum Ltd.
+Keybase Inc.
+Multiplay Ltd.
+Percona LLC
+Pivotal Inc.
+Stripe Inc.
+Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
new file mode 100644
index 0000000..72a738e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
@@ -0,0 +1,232 @@
+## Version 1.6 (2021-04-01)
+
+Changes:
+
+ - Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190)
+ - `NullTime` is deprecated (#960, #1144)
+ - Reduce allocations when building SET command (#1111)
+ - Performance improvement for time formatting (#1118)
+ - Performance improvement for time parsing (#1098, #1113)
+
+New Features:
+
+ - Implement `driver.Validator` interface (#1106, #1174)
+ - Support returning `uint64` from `Valuer` in `ConvertValue` (#1143)
+ - Add `json.RawMessage` for converter and prepared statement (#1059)
+ - Interpolate `json.RawMessage` as `string` (#1058)
+ - Implements `CheckNamedValue` (#1090)
+
+Bugfixes:
+
+ - Stop rounding times (#1121, #1172)
+ - Put zero filler into the SSL handshake packet (#1066)
+ - Fix checking cancelled connections back into the connection pool (#1095)
+ - Fix remove last 0 byte for mysql_old_password when password is empty (#1133)
+
+
+## Version 1.5 (2020-01-07)
+
+Changes:
+
+ - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017)
+ - Improve buffer handling (#890)
+ - Document potentially insecure TLS configs (#901)
+ - Use a double-buffering scheme to prevent data races (#943)
+ - Pass uint64 values without converting them to string (#838, #955)
+ - Update collations and make utf8mb4 default (#877, #1054)
+ - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995)
+ - Removed CloudSQL support (#993, #1007)
+ - Add Go Module support (#1003)
+
+New Features:
+
+ - Implement support of optional TLS (#900)
+ - Check connection liveness (#934, #964, #997, #1048, #1051, #1052)
+ - Implement Connector Interface (#941, #958, #1020, #1035)
+
+Bugfixes:
+
+ - Mark connections as bad on error during ping (#875)
+ - Mark connections as bad on error during dial (#867)
+ - Fix connection leak caused by rapid context cancellation (#1024)
+ - Mark connections as bad on error during Conn.Prepare (#1030)
+
+
+## Version 1.4.1 (2018-11-14)
+
+Bugfixes:
+
+ - Fix TIME format for binary columns (#818)
+ - Fix handling of empty auth plugin names (#835)
+ - Fix caching_sha2_password with empty password (#826)
+ - Fix canceled context broke mysqlConn (#862)
+ - Fix OldAuthSwitchRequest support (#870)
+ - Fix Auth Response packet for cleartext password (#887)
+
+## Version 1.4 (2018-06-03)
+
+Changes:
+
+ - Documentation fixes (#530, #535, #567)
+ - Refactoring (#575, #579, #580, #581, #603, #615, #704)
+ - Cache column names (#444)
+ - Sort the DSN parameters in DSNs generated from a config (#637)
+ - Allow native password authentication by default (#644)
+ - Use the default port if it is missing in the DSN (#668)
+ - Removed the `strict` mode (#676)
+ - Do not query `max_allowed_packet` by default (#680)
+ - Dropped support Go 1.6 and lower (#696)
+ - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
+ - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
+ - Improved the compatibility of the authentication system (#807)
+
+New Features:
+
+ - Multi-Results support (#537)
+ - `rejectReadOnly` DSN option (#604)
+ - `context.Context` support (#608, #612, #627, #761)
+ - Transaction isolation level support (#619, #744)
+ - Read-Only transactions support (#618, #634)
+ - `NewConfig` function which initializes a config with default values (#679)
+ - Implemented the `ColumnType` interfaces (#667, #724)
+ - Support for custom string types in `ConvertValue` (#623)
+ - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
+ - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
+ - Implemented `driver.SessionResetter` (#779)
+ - `sha256_password` authentication plugin support (#808)
+
+Bugfixes:
+
+ - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
+ - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
+ - Removed columns definition cache since it sometimes cached invalid data (#592)
+ - Don't mutate registered TLS configs (#600)
+ - Make RegisterTLSConfig concurrency-safe (#613)
+ - Handle missing auth data in the handshake packet correctly (#646)
+ - Do not retry queries when data was written to avoid data corruption (#302, #736)
+ - Cache the connection pointer for error handling before invalidating it (#678)
+ - Fixed imports for appengine/cloudsql (#700)
+ - Fix sending STMT_LONG_DATA for 0 byte data (#734)
+ - Set correct capacity for []bytes read from length-encoded strings (#766)
+ - Make RegisterDial concurrency-safe (#773)
+
+
+## Version 1.3 (2016-12-01)
+
+Changes:
+
+ - Go 1.1 is no longer supported
+ - Use decimals fields in MySQL to format time types (#249)
+ - Buffer optimizations (#269)
+ - TLS ServerName defaults to the host (#283)
+ - Refactoring (#400, #410, #437)
+ - Adjusted documentation for second generation CloudSQL (#485)
+ - Documented DSN system var quoting rules (#502)
+ - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
+
+New Features:
+
+ - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
+ - Support for returning table alias on Columns() (#289, #359, #382)
+ - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
+ - Support for uint64 parameters with high bit set (#332, #345)
+ - Cleartext authentication plugin support (#327)
+ - Exported ParseDSN function and the Config struct (#403, #419, #429)
+ - Read / Write timeouts (#401)
+ - Support for JSON field type (#414)
+ - Support for multi-statements and multi-results (#411, #431)
+ - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
+ - Native password authentication plugin support (#494, #524)
+
+Bugfixes:
+
+ - Fixed handling of queries without columns and rows (#255)
+ - Fixed a panic when SetKeepAlive() failed (#298)
+ - Handle ERR packets while reading rows (#321)
+ - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
+ - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
+ - Actually zero out bytes in handshake response (#378)
+ - Fixed race condition in registering LOAD DATA INFILE handler (#383)
+ - Fixed tests with MySQL 5.7.9+ (#380)
+ - QueryUnescape TLS config names (#397)
+ - Fixed "broken pipe" error by writing to closed socket (#390)
+ - Fixed LOAD LOCAL DATA INFILE buffering (#424)
+ - Fixed parsing of floats into float64 when placeholders are used (#434)
+ - Fixed DSN tests with Go 1.7+ (#459)
+ - Handle ERR packets while waiting for EOF (#473)
+ - Invalidate connection on error while discarding additional results (#513)
+ - Allow terminating packets of length 0 (#516)
+
+
+## Version 1.2 (2014-06-03)
+
+Changes:
+
+ - We switched back to a "rolling release". `go get` installs the current master branch again
+ - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
+ - Exported errors to allow easy checking from application code
+ - Enabled TCP Keepalives on TCP connections
+ - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
+ - The DSN parser also checks for a missing separating slash
+ - Faster binary date / datetime to string formatting
+ - Also exported the MySQLWarning type
+ - mysqlConn.Close returns the first error encountered instead of ignoring all errors
+ - writePacket() automatically writes the packet size to the header
+ - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets
+
+New Features:
+
+ - `RegisterDial` allows the usage of a custom dial function to establish the network connection
+ - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
+ - Logging of critical errors is configurable with `SetLogger`
+ - Google CloudSQL support
+
+Bugfixes:
+
+ - Allow more than 32 parameters in prepared statements
+ - Various old_password fixes
+ - Fixed TestConcurrent test to pass Go's race detection
+ - Fixed appendLengthEncodedInteger for large numbers
+ - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
+
+
+## Version 1.1 (2013-11-02)
+
+Changes:
+
+ - Go-MySQL-Driver now requires Go 1.1
+ - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
+ - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
+ - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
+ - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
+ - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
+ - Optimized the buffer for reading
+ - stmt.Query now caches column metadata
+ - New Logo
+ - Changed the copyright header to include all contributors
+ - Improved the LOAD INFILE documentation
+ - The driver struct is now exported to make the driver directly accessible
+ - Refactored the driver tests
+ - Added more benchmarks and moved all to a separate file
+ - Other small refactoring
+
+New Features:
+
+ - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
+ - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
+ - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
+
+Bugfixes:
+
+ - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
+ - Convert to DB timezone when inserting `time.Time`
+ - Splitted packets (more than 16MB) are now merged correctly
+ - Fixed false positive `io.EOF` errors when the data was fully read
+ - Avoid panics on reuse of closed connections
+ - Fixed empty string producing false nil values
+ - Fixed sign byte for positive TIME fields
+
+
+## Version 1.0 (2013-05-14)
+
+Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
new file mode 100644
index 0000000..14e2f77
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
new file mode 100644
index 0000000..0b13154
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -0,0 +1,520 @@
+# Go-MySQL-Driver
+
+A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
+
+![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin")
+
+---------------------------------------
+ * [Features](#features)
+ * [Requirements](#requirements)
+ * [Installation](#installation)
+ * [Usage](#usage)
+ * [DSN (Data Source Name)](#dsn-data-source-name)
+ * [Password](#password)
+ * [Protocol](#protocol)
+ * [Address](#address)
+ * [Parameters](#parameters)
+ * [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
+ * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
+ * [time.Time support](#timetime-support)
+ * [Unicode support](#unicode-support)
+ * [Testing / Development](#testing--development)
+ * [License](#license)
+
+---------------------------------------
+
+## Features
+ * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
+ * Native Go implementation. No C-bindings, just pure Go
+ * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
+ * Automatic handling of broken connections
+ * Automatic Connection Pooling *(by database/sql package)*
+ * Supports queries larger than 16MB
+ * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
+ * Intelligent `LONG DATA` handling in prepared statements
+ * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
+ * Optional `time.Time` parsing
+ * Optional placeholder interpolation
+
+## Requirements
+ * Go 1.10 or higher. We aim to support the 3 latest versions of Go.
+ * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
+
+---------------------------------------
+
+## Installation
+Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
+```bash
+$ go get -u github.com/go-sql-driver/mysql
+```
+Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
+
+## Usage
+_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
+
+Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
+
+```go
+import (
+ "database/sql"
+ "time"
+
+ _ "github.com/go-sql-driver/mysql"
+)
+
+// ...
+
+db, err := sql.Open("mysql", "user:password@/dbname")
+if err != nil {
+ panic(err)
+}
+// See "Important settings" section.
+db.SetConnMaxLifetime(time.Minute * 3)
+db.SetMaxOpenConns(10)
+db.SetMaxIdleConns(10)
+```
+
+[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
+
+### Important settings
+
+`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too.
+
+`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server.
+
+`db.SetMaxIdleConns()` is recommended to be set same to (or greater than) `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed very frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15.
+
+
+### DSN (Data Source Name)
+
+The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
+```
+[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
+```
+
+A DSN in its fullest form:
+```
+username:password@protocol(address)/dbname?param=value
+```
+
+Except for the databasename, all values are optional. So the minimal DSN is:
+```
+/dbname
+```
+
+If you do not want to preselect a database, leave `dbname` empty:
+```
+/
+```
+This has the same effect as an empty DSN string:
+```
+
+```
+
+Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
+
+#### Password
+Passwords can consist of any character. Escaping is **not** necessary.
+
+#### Protocol
+See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
+In general you should use an Unix domain socket if available and TCP otherwise for best performance.
+
+#### Address
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
+If `host` is a literal IPv6 address, it must be enclosed in square brackets.
+The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
+
+For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
+
+#### Parameters
+*Parameters are case-sensitive!*
+
+Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
+
+##### `allowAllFiles`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
+[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)
+
+##### `allowCleartextPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
+
+##### `allowNativePasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
+
+##### `allowOldPasswords`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
+
+##### `charset`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
+
+Usage of the `charset` parameter is discouraged because it issues additional queries to the server.
+Unless you need the fallback behavior, please use `collation` instead.
+
+##### `checkConnLiveness`
+
+```
+Type: bool
+Valid Values: true, false
+Default: true
+```
+
+On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection.
+`checkConnLiveness=false` disables this liveness check of connections.
+
+##### `collation`
+
+```
+Type: string
+Valid Values:
+Default: utf8mb4_general_ci
+```
+
+Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
+
+A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
+
+The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
+
+Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
+
+
+##### `clientFoundRows`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
+
+##### `columnsWithAlias`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
+
+```
+SELECT u.id FROM users as u
+```
+
+will return `u.id` instead of just `id` if `columnsWithAlias=true`.
+
+##### `interpolateParams`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
+
+*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
+
+##### `loc`
+
+```
+Type: string
+Valid Values:
+Default: UTC
+```
+
+Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
+
+Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
+
+Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
+
+##### `maxAllowedPacket`
+```
+Type: decimal number
+Default: 4194304
+```
+
+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
+
+##### `multiStatements`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded.
+
+When `multiStatements` is used, `?` parameters must only be used in the first statement.
+
+##### `parseTime`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
+The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
+
+
+##### `readTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+##### `rejectReadOnly`
+
+```
+Type: bool
+Valid Values: true, false
+Default: false
+```
+
+
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
+
+
+##### `serverPubKey`
+
+```
+Type: string
+Valid Values:
+Default: none
+```
+
+Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
+Public keys are used to transmit encrypted data, e.g. for authentication.
+If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
+
+
+##### `timeout`
+
+```
+Type: duration
+Default: OS default
+```
+
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### `tls`
+
+```
+Type: bool / string
+Valid Values: true, false, skip-verify, preferred,
+Default: false
+```
+
+`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
+
+##### `writeTimeout`
+
+```
+Type: duration
+Default: 0
+```
+
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
+
+##### System Variables
+
+Any other parameters are interpreted as system variables:
+ * `=`: `SET =`
+ * `=`: `SET =`
+ * `=%27%27`: `SET =''`
+
+Rules:
+* The values for string variables must be quoted with `'`.
+* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
+ (which implies values of string variables must be wrapped with `%27`).
+
+Examples:
+ * `autocommit=1`: `SET autocommit=1`
+ * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
+ * [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'`
+
+
+#### Examples
+```
+user@unix(/path/to/socket)/dbname
+```
+
+```
+root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
+```
+
+```
+user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
+```
+
+Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
+```
+user:password@/dbname?sql_mode=TRADITIONAL
+```
+
+TCP via IPv6:
+```
+user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
+```
+
+TCP on a remote host, e.g. Amazon RDS:
+```
+id:password@tcp(your-amazonaws-uri.com:3306)/dbname
+```
+
+Google Cloud SQL on App Engine:
+```
+user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
+```
+
+TCP using default port (3306) on localhost:
+```
+user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
+```
+
+Use the default protocol (tcp) and host (localhost:3306):
+```
+user:password@/dbname
+```
+
+No Database preselected:
+```
+user:password@/
+```
+
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
+
+### `LOAD DATA LOCAL INFILE` support
+For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
+```go
+import "github.com/go-sql-driver/mysql"
+```
+
+Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)).
+
+To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
+
+See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
+
+
+### `time.Time` support
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
+
+However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
+
+**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
+
+
+### Unicode support
+Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
+
+Other collations / charsets can be set using the [`collation`](#collation) DSN parameter.
+
+Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default.
+
+See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
+
+## Testing / Development
+To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
+
+Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
+If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
+
+See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details.
+
+---------------------------------------
+
+## License
+Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+
+Mozilla summarizes the license scope as follows:
+> MPL: The copyleft applies to any files containing MPLed code.
+
+
+That means:
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
+
+Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
+
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
+
+![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
new file mode 100644
index 0000000..b2f19e8
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/auth.go
@@ -0,0 +1,425 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "sync"
+)
+
+// server pub keys registry
+var (
+ serverPubKeyLock sync.RWMutex
+ serverPubKeyRegistry map[string]*rsa.PublicKey
+)
+
+// RegisterServerPubKey registers a server RSA public key which can be used to
+// send data in a secure manner to the server without receiving the public key
+// in a potentially insecure way from the server first.
+// Registered keys can afterwards be used adding serverPubKey= to the DSN.
+//
+// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
+// after registering it and may not be modified.
+//
+// data, err := ioutil.ReadFile("mykey.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// block, _ := pem.Decode(data)
+// if block == nil || block.Type != "PUBLIC KEY" {
+// log.Fatal("failed to decode PEM block containing public key")
+// }
+//
+// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
+// mysql.RegisterServerPubKey("mykey", rsaPubKey)
+// } else {
+// log.Fatal("not a RSA public key")
+// }
+//
+func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry == nil {
+ serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
+ }
+
+ serverPubKeyRegistry[name] = pubKey
+ serverPubKeyLock.Unlock()
+}
+
+// DeregisterServerPubKey removes the public key registered with the given name.
+func DeregisterServerPubKey(name string) {
+ serverPubKeyLock.Lock()
+ if serverPubKeyRegistry != nil {
+ delete(serverPubKeyRegistry, name)
+ }
+ serverPubKeyLock.Unlock()
+}
+
+func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
+ serverPubKeyLock.RLock()
+ if v, ok := serverPubKeyRegistry[name]; ok {
+ pubKey = v
+ }
+ serverPubKeyLock.RUnlock()
+ return
+}
+
+// Hash password using pre 4.1 (old password) method
+// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
+type myRnd struct {
+ seed1, seed2 uint32
+}
+
+const myRndMaxVal = 0x3FFFFFFF
+
+// Pseudo random number generator
+func newMyRnd(seed1, seed2 uint32) *myRnd {
+ return &myRnd{
+ seed1: seed1 % myRndMaxVal,
+ seed2: seed2 % myRndMaxVal,
+ }
+}
+
+// Tested to be equivalent to MariaDB's floating point variant
+// http://play.golang.org/p/QHvhd4qved
+// http://play.golang.org/p/RG0q4ElWDx
+func (r *myRnd) NextByte() byte {
+ r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
+ r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
+
+ return byte(uint64(r.seed1) * 31 / myRndMaxVal)
+}
+
+// Generate binary hash from byte string using insecure pre 4.1 method
+func pwHash(password []byte) (result [2]uint32) {
+ var add uint32 = 7
+ var tmp uint32
+
+ result[0] = 1345345333
+ result[1] = 0x12345671
+
+ for _, c := range password {
+ // skip spaces and tabs in password
+ if c == ' ' || c == '\t' {
+ continue
+ }
+
+ tmp = uint32(c)
+ result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
+ result[1] += (result[1] << 8) ^ result[0]
+ add += tmp
+ }
+
+ // Remove sign bit (1<<31)-1)
+ result[0] &= 0x7FFFFFFF
+ result[1] &= 0x7FFFFFFF
+
+ return
+}
+
+// Hash password using insecure pre 4.1 method
+func scrambleOldPassword(scramble []byte, password string) []byte {
+ scramble = scramble[:8]
+
+ hashPw := pwHash([]byte(password))
+ hashSc := pwHash(scramble)
+
+ r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
+
+ var out [8]byte
+ for i := range out {
+ out[i] = r.NextByte() + 64
+ }
+
+ mask := r.NextByte()
+ for i := range out {
+ out[i] ^= mask
+ }
+
+ return out[:]
+}
+
+// Hash password using 4.1+ method (SHA1)
+func scramblePassword(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write([]byte(password))
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+// Hash password using MySQL 8+ method (SHA256)
+func scrambleSHA256Password(scramble []byte, password string) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
+
+ crypt := sha256.New()
+ crypt.Write([]byte(password))
+ message1 := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1)
+ message1Hash := crypt.Sum(nil)
+
+ crypt.Reset()
+ crypt.Write(message1Hash)
+ crypt.Write(scramble)
+ message2 := crypt.Sum(nil)
+
+ for i := range message1 {
+ message1[i] ^= message2[i]
+ }
+
+ return message1
+}
+
+func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
+ plain := make([]byte, len(password)+1)
+ copy(plain, password)
+ for i := range plain {
+ j := i % len(seed)
+ plain[i] ^= seed[j]
+ }
+ sha1 := sha1.New()
+ return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
+}
+
+func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
+ enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
+ if err != nil {
+ return err
+ }
+ return mc.writeAuthSwitchPacket(enc)
+}
+
+func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
+ switch plugin {
+ case "caching_sha2_password":
+ authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
+ return authResp, nil
+
+ case "mysql_old_password":
+ if !mc.cfg.AllowOldPasswords {
+ return nil, ErrOldPassword
+ }
+ if len(mc.cfg.Passwd) == 0 {
+ return nil, nil
+ }
+ // Note: there are edge cases where this should work but doesn't;
+ // this is currently "wontfix":
+ // https://github.com/go-sql-driver/mysql/issues/184
+ authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
+ return authResp, nil
+
+ case "mysql_clear_password":
+ if !mc.cfg.AllowCleartextPasswords {
+ return nil, ErrCleartextPassword
+ }
+ // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
+ // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
+ return append([]byte(mc.cfg.Passwd), 0), nil
+
+ case "mysql_native_password":
+ if !mc.cfg.AllowNativePasswords {
+ return nil, ErrNativePassword
+ }
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
+ return authResp, nil
+
+ case "sha256_password":
+ if len(mc.cfg.Passwd) == 0 {
+ return []byte{0}, nil
+ }
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ return append([]byte(mc.cfg.Passwd), 0), nil
+ }
+
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ return []byte{1}, nil
+ }
+
+ // encrypted password
+ enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
+ return enc, err
+
+ default:
+ errLog.Print("unknown auth plugin:", plugin)
+ return nil, ErrUnknownPlugin
+ }
+}
+
+func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
+ // Read Result Packet
+ authData, newPlugin, err := mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // handle auth plugin switch, if requested
+ if newPlugin != "" {
+ // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
+ // sent and we have to keep using the cipher sent in the init packet.
+ if authData == nil {
+ authData = oldAuthData
+ } else {
+ // copy data from read buffer to owned slice
+ copy(oldAuthData, authData)
+ }
+
+ plugin = newPlugin
+
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ return err
+ }
+ if err = mc.writeAuthSwitchPacket(authResp); err != nil {
+ return err
+ }
+
+ // Read Result Packet
+ authData, newPlugin, err = mc.readAuthResult()
+ if err != nil {
+ return err
+ }
+
+ // Do not allow to change the auth plugin more than once
+ if newPlugin != "" {
+ return ErrMalformPkt
+ }
+ }
+
+ switch plugin {
+
+ // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/
+ case "caching_sha2_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ case 1:
+ switch authData[0] {
+ case cachingSha2PasswordFastAuthSuccess:
+ if err = mc.readResultOK(); err == nil {
+ return nil // auth successful
+ }
+
+ case cachingSha2PasswordPerformFullAuthentication:
+ if mc.cfg.tls != nil || mc.cfg.Net == "unix" {
+ // write cleartext auth packet
+ err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
+ if err != nil {
+ return err
+ }
+ } else {
+ pubKey := mc.cfg.pubKey
+ if pubKey == nil {
+ // request public key from server
+ data, err := mc.buf.takeSmallBuffer(4 + 1)
+ if err != nil {
+ return err
+ }
+ data[4] = cachingSha2PasswordRequestPublicKey
+ mc.writePacket(data)
+
+ // parse public key
+ if data, err = mc.readPacket(); err != nil {
+ return err
+ }
+
+ block, rest := pem.Decode(data[1:])
+ if block == nil {
+ return fmt.Errorf("No Pem data found, data: %s", rest)
+ }
+ pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+ pubKey = pkix.(*rsa.PublicKey)
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pubKey)
+ if err != nil {
+ return err
+ }
+ }
+ return mc.readResultOK()
+
+ default:
+ return ErrMalformPkt
+ }
+ default:
+ return ErrMalformPkt
+ }
+
+ case "sha256_password":
+ switch len(authData) {
+ case 0:
+ return nil // auth successful
+ default:
+ block, _ := pem.Decode(authData)
+ pub, err := x509.ParsePKIXPublicKey(block.Bytes)
+ if err != nil {
+ return err
+ }
+
+ // send encrypted password
+ err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
+ if err != nil {
+ return err
+ }
+ return mc.readResultOK()
+ }
+
+ default:
+ return nil // auth successful
+ }
+
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
new file mode 100644
index 0000000..0774c5c
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/buffer.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "io"
+ "net"
+ "time"
+)
+
+const defaultBufSize = 4096
+const maxCachedBufSize = 256 * 1024
+
+// A buffer which is used for both reading and writing.
+// This is possible since communication on each connection is synchronous.
+// In other words, we can't write and read simultaneously on the same connection.
+// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
+// Also highly optimized for this particular use case.
+// This buffer is backed by two byte slices in a double-buffering scheme
+type buffer struct {
+ buf []byte // buf is a byte buffer who's length and capacity are equal.
+ nc net.Conn
+ idx int
+ length int
+ timeout time.Duration
+ dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
+ flipcnt uint // flipccnt is the current buffer counter for double-buffering
+}
+
+// newBuffer allocates and returns a new buffer.
+func newBuffer(nc net.Conn) buffer {
+ fg := make([]byte, defaultBufSize)
+ return buffer{
+ buf: fg,
+ nc: nc,
+ dbuf: [2][]byte{fg, nil},
+ }
+}
+
+// flip replaces the active buffer with the background buffer
+// this is a delayed flip that simply increases the buffer counter;
+// the actual flip will be performed the next time we call `buffer.fill`
+func (b *buffer) flip() {
+ b.flipcnt += 1
+}
+
+// fill reads into the buffer until at least _need_ bytes are in it
+func (b *buffer) fill(need int) error {
+ n := b.length
+ // fill data into its double-buffering target: if we've called
+ // flip on this buffer, we'll be copying to the background buffer,
+ // and then filling it with network data; otherwise we'll just move
+ // the contents of the current buffer to the front before filling it
+ dest := b.dbuf[b.flipcnt&1]
+
+ // grow buffer if necessary to fit the whole packet.
+ if need > len(dest) {
+ // Round up to the next multiple of the default size
+ dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
+
+ // if the allocated buffer is not too large, move it to backing storage
+ // to prevent extra allocations on applications that perform large reads
+ if len(dest) <= maxCachedBufSize {
+ b.dbuf[b.flipcnt&1] = dest
+ }
+ }
+
+ // if we're filling the fg buffer, move the existing data to the start of it.
+ // if we're filling the bg buffer, copy over the data
+ if n > 0 {
+ copy(dest[:n], b.buf[b.idx:])
+ }
+
+ b.buf = dest
+ b.idx = 0
+
+ for {
+ if b.timeout > 0 {
+ if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
+ return err
+ }
+ }
+
+ nn, err := b.nc.Read(b.buf[n:])
+ n += nn
+
+ switch err {
+ case nil:
+ if n < need {
+ continue
+ }
+ b.length = n
+ return nil
+
+ case io.EOF:
+ if n >= need {
+ b.length = n
+ return nil
+ }
+ return io.ErrUnexpectedEOF
+
+ default:
+ return err
+ }
+ }
+}
+
+// returns next N bytes from buffer.
+// The returned slice is only guaranteed to be valid until the next read
+func (b *buffer) readNext(need int) ([]byte, error) {
+ if b.length < need {
+ // refill
+ if err := b.fill(need); err != nil {
+ return nil, err
+ }
+ }
+
+ offset := b.idx
+ b.idx += need
+ b.length -= need
+ return b.buf[offset:b.idx], nil
+}
+
+// takeBuffer returns a buffer with the requested size.
+// If possible, a slice from the existing buffer is returned.
+// Otherwise a bigger buffer is made.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeBuffer(length int) ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+
+ // test (cheap) general case first
+ if length <= cap(b.buf) {
+ return b.buf[:length], nil
+ }
+
+ if length < maxPacketSize {
+ b.buf = make([]byte, length)
+ return b.buf, nil
+ }
+
+ // buffer is larger than we want to store.
+ return make([]byte, length), nil
+}
+
+// takeSmallBuffer is shortcut which can be used if length is
+// known to be smaller than defaultBufSize.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+ return b.buf[:length], nil
+}
+
+// takeCompleteBuffer returns the complete existing buffer.
+// This can be used if the necessary buffer size is unknown.
+// cap and len of the returned buffer will be equal.
+// Only one buffer (total) can be used at a time.
+func (b *buffer) takeCompleteBuffer() ([]byte, error) {
+ if b.length > 0 {
+ return nil, ErrBusyBuffer
+ }
+ return b.buf, nil
+}
+
+// store stores buf, an updated buffer, if its suitable to do so.
+func (b *buffer) store(buf []byte) error {
+ if b.length > 0 {
+ return ErrBusyBuffer
+ } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
+ b.buf = buf[:cap(buf)]
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
new file mode 100644
index 0000000..326a9f7
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/collations.go
@@ -0,0 +1,265 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const defaultCollation = "utf8mb4_general_ci"
+const binaryCollation = "binary"
+
+// A list of available collations mapped to the internal ID.
+// To update this map use the following MySQL query:
+// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
+//
+// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
+//
+// ucs2, utf16, and utf32 can't be used for connection charset.
+// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
+// They are commented out to reduce this map.
+var collations = map[string]byte{
+ "big5_chinese_ci": 1,
+ "latin2_czech_cs": 2,
+ "dec8_swedish_ci": 3,
+ "cp850_general_ci": 4,
+ "latin1_german1_ci": 5,
+ "hp8_english_ci": 6,
+ "koi8r_general_ci": 7,
+ "latin1_swedish_ci": 8,
+ "latin2_general_ci": 9,
+ "swe7_swedish_ci": 10,
+ "ascii_general_ci": 11,
+ "ujis_japanese_ci": 12,
+ "sjis_japanese_ci": 13,
+ "cp1251_bulgarian_ci": 14,
+ "latin1_danish_ci": 15,
+ "hebrew_general_ci": 16,
+ "tis620_thai_ci": 18,
+ "euckr_korean_ci": 19,
+ "latin7_estonian_cs": 20,
+ "latin2_hungarian_ci": 21,
+ "koi8u_general_ci": 22,
+ "cp1251_ukrainian_ci": 23,
+ "gb2312_chinese_ci": 24,
+ "greek_general_ci": 25,
+ "cp1250_general_ci": 26,
+ "latin2_croatian_ci": 27,
+ "gbk_chinese_ci": 28,
+ "cp1257_lithuanian_ci": 29,
+ "latin5_turkish_ci": 30,
+ "latin1_german2_ci": 31,
+ "armscii8_general_ci": 32,
+ "utf8_general_ci": 33,
+ "cp1250_czech_cs": 34,
+ //"ucs2_general_ci": 35,
+ "cp866_general_ci": 36,
+ "keybcs2_general_ci": 37,
+ "macce_general_ci": 38,
+ "macroman_general_ci": 39,
+ "cp852_general_ci": 40,
+ "latin7_general_ci": 41,
+ "latin7_general_cs": 42,
+ "macce_bin": 43,
+ "cp1250_croatian_ci": 44,
+ "utf8mb4_general_ci": 45,
+ "utf8mb4_bin": 46,
+ "latin1_bin": 47,
+ "latin1_general_ci": 48,
+ "latin1_general_cs": 49,
+ "cp1251_bin": 50,
+ "cp1251_general_ci": 51,
+ "cp1251_general_cs": 52,
+ "macroman_bin": 53,
+ //"utf16_general_ci": 54,
+ //"utf16_bin": 55,
+ //"utf16le_general_ci": 56,
+ "cp1256_general_ci": 57,
+ "cp1257_bin": 58,
+ "cp1257_general_ci": 59,
+ //"utf32_general_ci": 60,
+ //"utf32_bin": 61,
+ //"utf16le_bin": 62,
+ "binary": 63,
+ "armscii8_bin": 64,
+ "ascii_bin": 65,
+ "cp1250_bin": 66,
+ "cp1256_bin": 67,
+ "cp866_bin": 68,
+ "dec8_bin": 69,
+ "greek_bin": 70,
+ "hebrew_bin": 71,
+ "hp8_bin": 72,
+ "keybcs2_bin": 73,
+ "koi8r_bin": 74,
+ "koi8u_bin": 75,
+ "utf8_tolower_ci": 76,
+ "latin2_bin": 77,
+ "latin5_bin": 78,
+ "latin7_bin": 79,
+ "cp850_bin": 80,
+ "cp852_bin": 81,
+ "swe7_bin": 82,
+ "utf8_bin": 83,
+ "big5_bin": 84,
+ "euckr_bin": 85,
+ "gb2312_bin": 86,
+ "gbk_bin": 87,
+ "sjis_bin": 88,
+ "tis620_bin": 89,
+ //"ucs2_bin": 90,
+ "ujis_bin": 91,
+ "geostd8_general_ci": 92,
+ "geostd8_bin": 93,
+ "latin1_spanish_ci": 94,
+ "cp932_japanese_ci": 95,
+ "cp932_bin": 96,
+ "eucjpms_japanese_ci": 97,
+ "eucjpms_bin": 98,
+ "cp1250_polish_ci": 99,
+ //"utf16_unicode_ci": 101,
+ //"utf16_icelandic_ci": 102,
+ //"utf16_latvian_ci": 103,
+ //"utf16_romanian_ci": 104,
+ //"utf16_slovenian_ci": 105,
+ //"utf16_polish_ci": 106,
+ //"utf16_estonian_ci": 107,
+ //"utf16_spanish_ci": 108,
+ //"utf16_swedish_ci": 109,
+ //"utf16_turkish_ci": 110,
+ //"utf16_czech_ci": 111,
+ //"utf16_danish_ci": 112,
+ //"utf16_lithuanian_ci": 113,
+ //"utf16_slovak_ci": 114,
+ //"utf16_spanish2_ci": 115,
+ //"utf16_roman_ci": 116,
+ //"utf16_persian_ci": 117,
+ //"utf16_esperanto_ci": 118,
+ //"utf16_hungarian_ci": 119,
+ //"utf16_sinhala_ci": 120,
+ //"utf16_german2_ci": 121,
+ //"utf16_croatian_ci": 122,
+ //"utf16_unicode_520_ci": 123,
+ //"utf16_vietnamese_ci": 124,
+ //"ucs2_unicode_ci": 128,
+ //"ucs2_icelandic_ci": 129,
+ //"ucs2_latvian_ci": 130,
+ //"ucs2_romanian_ci": 131,
+ //"ucs2_slovenian_ci": 132,
+ //"ucs2_polish_ci": 133,
+ //"ucs2_estonian_ci": 134,
+ //"ucs2_spanish_ci": 135,
+ //"ucs2_swedish_ci": 136,
+ //"ucs2_turkish_ci": 137,
+ //"ucs2_czech_ci": 138,
+ //"ucs2_danish_ci": 139,
+ //"ucs2_lithuanian_ci": 140,
+ //"ucs2_slovak_ci": 141,
+ //"ucs2_spanish2_ci": 142,
+ //"ucs2_roman_ci": 143,
+ //"ucs2_persian_ci": 144,
+ //"ucs2_esperanto_ci": 145,
+ //"ucs2_hungarian_ci": 146,
+ //"ucs2_sinhala_ci": 147,
+ //"ucs2_german2_ci": 148,
+ //"ucs2_croatian_ci": 149,
+ //"ucs2_unicode_520_ci": 150,
+ //"ucs2_vietnamese_ci": 151,
+ //"ucs2_general_mysql500_ci": 159,
+ //"utf32_unicode_ci": 160,
+ //"utf32_icelandic_ci": 161,
+ //"utf32_latvian_ci": 162,
+ //"utf32_romanian_ci": 163,
+ //"utf32_slovenian_ci": 164,
+ //"utf32_polish_ci": 165,
+ //"utf32_estonian_ci": 166,
+ //"utf32_spanish_ci": 167,
+ //"utf32_swedish_ci": 168,
+ //"utf32_turkish_ci": 169,
+ //"utf32_czech_ci": 170,
+ //"utf32_danish_ci": 171,
+ //"utf32_lithuanian_ci": 172,
+ //"utf32_slovak_ci": 173,
+ //"utf32_spanish2_ci": 174,
+ //"utf32_roman_ci": 175,
+ //"utf32_persian_ci": 176,
+ //"utf32_esperanto_ci": 177,
+ //"utf32_hungarian_ci": 178,
+ //"utf32_sinhala_ci": 179,
+ //"utf32_german2_ci": 180,
+ //"utf32_croatian_ci": 181,
+ //"utf32_unicode_520_ci": 182,
+ //"utf32_vietnamese_ci": 183,
+ "utf8_unicode_ci": 192,
+ "utf8_icelandic_ci": 193,
+ "utf8_latvian_ci": 194,
+ "utf8_romanian_ci": 195,
+ "utf8_slovenian_ci": 196,
+ "utf8_polish_ci": 197,
+ "utf8_estonian_ci": 198,
+ "utf8_spanish_ci": 199,
+ "utf8_swedish_ci": 200,
+ "utf8_turkish_ci": 201,
+ "utf8_czech_ci": 202,
+ "utf8_danish_ci": 203,
+ "utf8_lithuanian_ci": 204,
+ "utf8_slovak_ci": 205,
+ "utf8_spanish2_ci": 206,
+ "utf8_roman_ci": 207,
+ "utf8_persian_ci": 208,
+ "utf8_esperanto_ci": 209,
+ "utf8_hungarian_ci": 210,
+ "utf8_sinhala_ci": 211,
+ "utf8_german2_ci": 212,
+ "utf8_croatian_ci": 213,
+ "utf8_unicode_520_ci": 214,
+ "utf8_vietnamese_ci": 215,
+ "utf8_general_mysql500_ci": 223,
+ "utf8mb4_unicode_ci": 224,
+ "utf8mb4_icelandic_ci": 225,
+ "utf8mb4_latvian_ci": 226,
+ "utf8mb4_romanian_ci": 227,
+ "utf8mb4_slovenian_ci": 228,
+ "utf8mb4_polish_ci": 229,
+ "utf8mb4_estonian_ci": 230,
+ "utf8mb4_spanish_ci": 231,
+ "utf8mb4_swedish_ci": 232,
+ "utf8mb4_turkish_ci": 233,
+ "utf8mb4_czech_ci": 234,
+ "utf8mb4_danish_ci": 235,
+ "utf8mb4_lithuanian_ci": 236,
+ "utf8mb4_slovak_ci": 237,
+ "utf8mb4_spanish2_ci": 238,
+ "utf8mb4_roman_ci": 239,
+ "utf8mb4_persian_ci": 240,
+ "utf8mb4_esperanto_ci": 241,
+ "utf8mb4_hungarian_ci": 242,
+ "utf8mb4_sinhala_ci": 243,
+ "utf8mb4_german2_ci": 244,
+ "utf8mb4_croatian_ci": 245,
+ "utf8mb4_unicode_520_ci": 246,
+ "utf8mb4_vietnamese_ci": 247,
+ "gb18030_chinese_ci": 248,
+ "gb18030_bin": 249,
+ "gb18030_unicode_520_ci": 250,
+ "utf8mb4_0900_ai_ci": 255,
+}
+
+// A denylist of collations which is unsafe to interpolate parameters.
+// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
+var unsafeCollations = map[string]bool{
+ "big5_chinese_ci": true,
+ "sjis_japanese_ci": true,
+ "gbk_chinese_ci": true,
+ "big5_bin": true,
+ "gb2312_bin": true,
+ "gbk_bin": true,
+ "sjis_bin": true,
+ "cp932_japanese_ci": true,
+ "cp932_bin": true,
+ "gb18030_chinese_ci": true,
+ "gb18030_bin": true,
+ "gb18030_unicode_520_ci": true,
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go
new file mode 100644
index 0000000..024eb28
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/conncheck.go
@@ -0,0 +1,54 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
+
+package mysql
+
+import (
+ "errors"
+ "io"
+ "net"
+ "syscall"
+)
+
+var errUnexpectedRead = errors.New("unexpected read from socket")
+
+func connCheck(conn net.Conn) error {
+ var sysErr error
+
+ sysConn, ok := conn.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ rawConn, err := sysConn.SyscallConn()
+ if err != nil {
+ return err
+ }
+
+ err = rawConn.Read(func(fd uintptr) bool {
+ var buf [1]byte
+ n, err := syscall.Read(int(fd), buf[:])
+ switch {
+ case n == 0 && err == nil:
+ sysErr = io.EOF
+ case n > 0:
+ sysErr = errUnexpectedRead
+ case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
+ sysErr = nil
+ default:
+ sysErr = err
+ }
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ return sysErr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
new file mode 100644
index 0000000..ea7fb60
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
@@ -0,0 +1,17 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos
+
+package mysql
+
+import "net"
+
+func connCheck(conn net.Conn) error {
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
new file mode 100644
index 0000000..835f897
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -0,0 +1,650 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/json"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type mysqlConn struct {
+ buf buffer
+ netConn net.Conn
+ rawConn net.Conn // underlying connection when netConn is TLS connection.
+ affectedRows uint64
+ insertId uint64
+ cfg *Config
+ maxAllowedPacket int
+ maxWriteSize int
+ writeTimeout time.Duration
+ flags clientFlag
+ status statusFlag
+ sequence uint8
+ parseTime bool
+ reset bool // set when the Go SQL package calls ResetSession
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- context.Context
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
+}
+
+// Handles parameters set in DSN after the connection is established
+func (mc *mysqlConn) handleParams() (err error) {
+ var cmdSet strings.Builder
+ for param, val := range mc.cfg.Params {
+ switch param {
+ // Charset: character_set_connection, character_set_client, character_set_results
+ case "charset":
+ charsets := strings.Split(val, ",")
+ for i := range charsets {
+ // ignore errors here - a charset may not exist
+ err = mc.exec("SET NAMES " + charsets[i])
+ if err == nil {
+ break
+ }
+ }
+ if err != nil {
+ return
+ }
+
+ // Other system vars accumulated in a single SET command
+ default:
+ if cmdSet.Len() == 0 {
+ // Heuristic: 29 chars for each other key=value to reduce reallocations
+ cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1))
+ cmdSet.WriteString("SET ")
+ } else {
+ cmdSet.WriteByte(',')
+ }
+ cmdSet.WriteString(param)
+ cmdSet.WriteByte('=')
+ cmdSet.WriteString(val)
+ }
+ }
+
+ if cmdSet.Len() > 0 {
+ err = mc.exec(cmdSet.String())
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
+func (mc *mysqlConn) Begin() (driver.Tx, error) {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
+ if err == nil {
+ return &mysqlTx{mc}, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+func (mc *mysqlConn) Close() (err error) {
+ // Makes Close idempotent
+ if !mc.closed.IsSet() {
+ err = mc.writeCommandPacket(comQuit)
+ }
+
+ mc.cleanup()
+
+ return
+}
+
+// Closes the network connection and unsets internal variables. Do not call this
+// function after successfully authentication, call Close instead. This function
+// is called before auth or on auth failure because MySQL will have already
+// closed the network connection.
+func (mc *mysqlConn) cleanup() {
+ if !mc.closed.TrySet(true) {
+ return
+ }
+
+ // Makes cleanup idempotent
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.IsSet() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
+ }
+ return ErrInvalidConn
+ }
+ return nil
+}
+
+func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comStmtPrepare, query)
+ if err != nil {
+ // STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
+ errLog.Print(err)
+ return nil, driver.ErrBadConn
+ }
+
+ stmt := &mysqlStmt{
+ mc: mc,
+ }
+
+ // Read Result
+ columnCount, err := stmt.readPrepareResultPacket()
+ if err == nil {
+ if stmt.paramCount > 0 {
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if columnCount > 0 {
+ err = mc.readUntilEOF()
+ }
+ }
+
+ return stmt, err
+}
+
+func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
+ // Number of ? should be same to len(args)
+ if strings.Count(query, "?") != len(args) {
+ return "", driver.ErrSkip
+ }
+
+ buf, err := mc.buf.takeCompleteBuffer()
+ if err != nil {
+ // can not take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return "", ErrInvalidConn
+ }
+ buf = buf[:0]
+ argPos := 0
+
+ for i := 0; i < len(query); i++ {
+ q := strings.IndexByte(query[i:], '?')
+ if q == -1 {
+ buf = append(buf, query[i:]...)
+ break
+ }
+ buf = append(buf, query[i:i+q]...)
+ i += q
+
+ arg := args[argPos]
+ argPos++
+
+ if arg == nil {
+ buf = append(buf, "NULL"...)
+ continue
+ }
+
+ switch v := arg.(type) {
+ case int64:
+ buf = strconv.AppendInt(buf, v, 10)
+ case uint64:
+ // Handle uint64 explicitly because our custom ConvertValue emits unsigned values
+ buf = strconv.AppendUint(buf, v, 10)
+ case float64:
+ buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
+ case bool:
+ if v {
+ buf = append(buf, '1')
+ } else {
+ buf = append(buf, '0')
+ }
+ case time.Time:
+ if v.IsZero() {
+ buf = append(buf, "'0000-00-00'"...)
+ } else {
+ buf = append(buf, '\'')
+ buf, err = appendDateTime(buf, v.In(mc.cfg.Loc))
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, '\'')
+ }
+ case json.RawMessage:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ case []byte:
+ if v == nil {
+ buf = append(buf, "NULL"...)
+ } else {
+ buf = append(buf, "_binary'"...)
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeBytesBackslash(buf, v)
+ } else {
+ buf = escapeBytesQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ }
+ case string:
+ buf = append(buf, '\'')
+ if mc.status&statusNoBackslashEscapes == 0 {
+ buf = escapeStringBackslash(buf, v)
+ } else {
+ buf = escapeStringQuotes(buf, v)
+ }
+ buf = append(buf, '\'')
+ default:
+ return "", driver.ErrSkip
+ }
+
+ if len(buf)+4 > mc.maxAllowedPacket {
+ return "", driver.ErrSkip
+ }
+ }
+ if argPos != len(args) {
+ return "", driver.ErrSkip
+ }
+ return string(buf), nil
+}
+
+func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ err := mc.exec(query)
+ if err == nil {
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, err
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Internal function to execute commands
+func (mc *mysqlConn) exec(query string) error {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+
+ return mc.discardResults()
+}
+
+func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ if len(args) != 0 {
+ if !mc.cfg.InterpolateParams {
+ return nil, driver.ErrSkip
+ }
+ // try client-side prepare to reduce roundtrip
+ prepared, err := mc.interpolateParams(query, args)
+ if err != nil {
+ return nil, err
+ }
+ query = prepared
+ }
+ // Send command
+ err := mc.writeCommandPacketStr(comQuery, query)
+ if err == nil {
+ // Read Result
+ var resLen int
+ resLen, err = mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+
+ if resLen == 0 {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ // Columns
+ rows.rs.columns, err = mc.readColumns(resLen)
+ return rows, err
+ }
+ }
+ return nil, mc.markBadConn(err)
+}
+
+// Gets the value of the given MySQL System Variable
+// The returned byte slice is only valid until the next read
+func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
+ // Send command
+ if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
+ return nil, err
+ }
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err == nil {
+ rows := new(textRows)
+ rows.mc = mc
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+
+ if resLen > 0 {
+ // Columns
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ dest := make([]driver.Value, resLen)
+ if err = rows.readRow(dest); err == nil {
+ return dest[0].([]byte), mc.readUntilEOF()
+ }
+ }
+ return nil, err
+}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err = mc.watchCancel(ctx); err != nil {
+ return
+ }
+ defer mc.finish()
+
+ if err = mc.writeCommandPacket(comPing); err != nil {
+ return mc.markBadConn(err)
+ }
+
+ return mc.readResultOK()
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if mc.closed.IsSet() {
+ return nil, driver.ErrBadConn
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ // When ctx is already cancelled, don't watch it.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ // When ctx is not cancellable, don't watch it.
+ if ctx.Done() == nil {
+ return nil
+ }
+ // When watcher is not alive, can't watch it.
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watching = true
+ mc.watcher <- ctx
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan context.Context, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx context.Context
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
+
+func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+// ResetSession implements driver.SessionResetter.
+// (From Go 1.10)
+func (mc *mysqlConn) ResetSession(ctx context.Context) error {
+ if mc.closed.IsSet() {
+ return driver.ErrBadConn
+ }
+ mc.reset = true
+ return nil
+}
+
+// IsValid implements driver.Validator interface
+// (From Go 1.15)
+func (mc *mysqlConn) IsValid() bool {
+ return !mc.closed.IsSet()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
new file mode 100644
index 0000000..d567b4e
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connector.go
@@ -0,0 +1,146 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "context"
+ "database/sql/driver"
+ "net"
+)
+
+type connector struct {
+ cfg *Config // immutable private copy.
+}
+
+// Connect implements driver.Connector interface.
+// Connect returns a connection to the database.
+func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
+ var err error
+
+ // New mysqlConn
+ mc := &mysqlConn{
+ maxAllowedPacket: maxPacketSize,
+ maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
+ cfg: c.cfg,
+ }
+ mc.parseTime = mc.cfg.ParseTime
+
+ // Connect to Server
+ dialsLock.RLock()
+ dial, ok := dials[mc.cfg.Net]
+ dialsLock.RUnlock()
+ if ok {
+ dctx := ctx
+ if mc.cfg.Timeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
+ defer cancel()
+ }
+ mc.netConn, err = dial(dctx, mc.cfg.Addr)
+ } else {
+ nd := net.Dialer{Timeout: mc.cfg.Timeout}
+ mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ // Enable TCP Keepalives on TCP connections
+ if tc, ok := mc.netConn.(*net.TCPConn); ok {
+ if err := tc.SetKeepAlive(true); err != nil {
+ // Don't send COM_QUIT before handshake.
+ mc.netConn.Close()
+ mc.netConn = nil
+ return nil, err
+ }
+ }
+
+ // Call startWatcher for context support (From Go 1.8)
+ mc.startWatcher()
+ if err := mc.watchCancel(ctx); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ defer mc.finish()
+
+ mc.buf = newBuffer(mc.netConn)
+
+ // Set I/O timeouts
+ mc.buf.timeout = mc.cfg.ReadTimeout
+ mc.writeTimeout = mc.cfg.WriteTimeout
+
+ // Reading Handshake Initialization Packet
+ authData, plugin, err := mc.readHandshakePacket()
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ if plugin == "" {
+ plugin = defaultAuthPlugin
+ }
+
+ // Send Client Authentication Packet
+ authResp, err := mc.auth(authData, plugin)
+ if err != nil {
+ // try the default auth plugin, if using the requested plugin failed
+ errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
+ plugin = defaultAuthPlugin
+ authResp, err = mc.auth(authData, plugin)
+ if err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+ }
+ if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
+ mc.cleanup()
+ return nil, err
+ }
+
+ // Handle response to auth packet, switch methods if possible
+ if err = mc.handleAuthResult(authData, plugin); err != nil {
+ // Authentication failed and MySQL has already closed the connection
+ // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
+ // Do not send COM_QUIT, just cleanup and return the error.
+ mc.cleanup()
+ return nil, err
+ }
+
+ if mc.cfg.MaxAllowedPacket > 0 {
+ mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
+ } else {
+ // Get max allowed packet size
+ maxap, err := mc.getSystemVar("max_allowed_packet")
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+ mc.maxAllowedPacket = stringToInt(maxap) - 1
+ }
+ if mc.maxAllowedPacket < maxPacketSize {
+ mc.maxWriteSize = mc.maxAllowedPacket
+ }
+
+ // Handle DSN Params
+ err = mc.handleParams()
+ if err != nil {
+ mc.Close()
+ return nil, err
+ }
+
+ return mc, nil
+}
+
+// Driver implements driver.Connector interface.
+// Driver returns &MySQLDriver{}.
+func (c *connector) Driver() driver.Driver {
+ return &MySQLDriver{}
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
new file mode 100644
index 0000000..b1e6b85
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -0,0 +1,174 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+const (
+ defaultAuthPlugin = "mysql_native_password"
+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ minProtocolVersion = 10
+ maxPacketSize = 1<<24 - 1
+ timeFormat = "2006-01-02 15:04:05.999999"
+)
+
+// MySQL constants documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+const (
+ iOK byte = 0x00
+ iAuthMoreData byte = 0x01
+ iLocalInFile byte = 0xfb
+ iEOF byte = 0xfe
+ iERR byte = 0xff
+)
+
+// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
+type clientFlag uint32
+
+const (
+ clientLongPassword clientFlag = 1 << iota
+ clientFoundRows
+ clientLongFlag
+ clientConnectWithDB
+ clientNoSchema
+ clientCompress
+ clientODBC
+ clientLocalFiles
+ clientIgnoreSpace
+ clientProtocol41
+ clientInteractive
+ clientSSL
+ clientIgnoreSIGPIPE
+ clientTransactions
+ clientReserved
+ clientSecureConn
+ clientMultiStatements
+ clientMultiResults
+ clientPSMultiResults
+ clientPluginAuth
+ clientConnectAttrs
+ clientPluginAuthLenEncClientData
+ clientCanHandleExpiredPasswords
+ clientSessionTrack
+ clientDeprecateEOF
+)
+
+const (
+ comQuit byte = iota + 1
+ comInitDB
+ comQuery
+ comFieldList
+ comCreateDB
+ comDropDB
+ comRefresh
+ comShutdown
+ comStatistics
+ comProcessInfo
+ comConnect
+ comProcessKill
+ comDebug
+ comPing
+ comTime
+ comDelayedInsert
+ comChangeUser
+ comBinlogDump
+ comTableDump
+ comConnectOut
+ comRegisterSlave
+ comStmtPrepare
+ comStmtExecute
+ comStmtSendLongData
+ comStmtClose
+ comStmtReset
+ comSetOption
+ comStmtFetch
+)
+
+// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
+const (
+ fieldTypeDecimal fieldType = iota
+ fieldTypeTiny
+ fieldTypeShort
+ fieldTypeLong
+ fieldTypeFloat
+ fieldTypeDouble
+ fieldTypeNULL
+ fieldTypeTimestamp
+ fieldTypeLongLong
+ fieldTypeInt24
+ fieldTypeDate
+ fieldTypeTime
+ fieldTypeDateTime
+ fieldTypeYear
+ fieldTypeNewDate
+ fieldTypeVarChar
+ fieldTypeBit
+)
+const (
+ fieldTypeJSON fieldType = iota + 0xf5
+ fieldTypeNewDecimal
+ fieldTypeEnum
+ fieldTypeSet
+ fieldTypeTinyBLOB
+ fieldTypeMediumBLOB
+ fieldTypeLongBLOB
+ fieldTypeBLOB
+ fieldTypeVarString
+ fieldTypeString
+ fieldTypeGeometry
+)
+
+type fieldFlag uint16
+
+const (
+ flagNotNULL fieldFlag = 1 << iota
+ flagPriKey
+ flagUniqueKey
+ flagMultipleKey
+ flagBLOB
+ flagUnsigned
+ flagZeroFill
+ flagBinary
+ flagEnum
+ flagAutoIncrement
+ flagTimestamp
+ flagSet
+ flagUnknown1
+ flagUnknown2
+ flagUnknown3
+ flagUnknown4
+)
+
+// http://dev.mysql.com/doc/internals/en/status-flags.html
+type statusFlag uint16
+
+const (
+ statusInTrans statusFlag = 1 << iota
+ statusInAutocommit
+ statusReserved // Not in documentation
+ statusMoreResultsExists
+ statusNoGoodIndexUsed
+ statusNoIndexUsed
+ statusCursorExists
+ statusLastRowSent
+ statusDbDropped
+ statusNoBackslashEscapes
+ statusMetadataChanged
+ statusQueryWasSlow
+ statusPsOutParams
+ statusInTransReadonly
+ statusSessionStateChanged
+)
+
+const (
+ cachingSha2PasswordRequestPublicKey = 2
+ cachingSha2PasswordFastAuthSuccess = 3
+ cachingSha2PasswordPerformFullAuthentication = 4
+)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
new file mode 100644
index 0000000..c1bdf11
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -0,0 +1,107 @@
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// Package mysql provides a MySQL driver for Go's database/sql package.
+//
+// The driver should be used via the database/sql package:
+//
+// import "database/sql"
+// import _ "github.com/go-sql-driver/mysql"
+//
+// db, err := sql.Open("mysql", "user:password@/dbname")
+//
+// See https://github.com/go-sql-driver/mysql#usage for details
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+ "net"
+ "sync"
+)
+
+// MySQLDriver is exported to make the driver directly accessible.
+// In general the driver is used via the database/sql package.
+type MySQLDriver struct{}
+
+// DialFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDial
+//
+// Deprecated: users should register a DialContextFunc instead
+type DialFunc func(addr string) (net.Conn, error)
+
+// DialContextFunc is a function which can be used to establish the network connection.
+// Custom dial functions must be registered with RegisterDialContext
+type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
+
+var (
+ dialsLock sync.RWMutex
+ dials map[string]DialContextFunc
+)
+
+// RegisterDialContext registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// The current context for the connection and its address is passed to the dial function.
+func RegisterDialContext(net string, dial DialContextFunc) {
+ dialsLock.Lock()
+ defer dialsLock.Unlock()
+ if dials == nil {
+ dials = make(map[string]DialContextFunc)
+ }
+ dials[net] = dial
+}
+
+// RegisterDial registers a custom dial function. It can then be used by the
+// network address mynet(addr), where mynet is the registered new network.
+// addr is passed as a parameter to the dial function.
+//
+// Deprecated: users should call RegisterDialContext instead
+func RegisterDial(network string, dial DialFunc) {
+ RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
+ return dial(addr)
+ })
+}
+
+// Open new Connection.
+// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
+// the DSN string is formatted
+func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ c := &connector{
+ cfg: cfg,
+ }
+ return c.Connect(context.Background())
+}
+
+func init() {
+ sql.Register("mysql", &MySQLDriver{})
+}
+
+// NewConnector returns new driver.Connector.
+func NewConnector(cfg *Config) (driver.Connector, error) {
+ cfg = cfg.Clone()
+ // normalize the contents of cfg so calls to NewConnector have the same
+ // behavior as MySQLDriver.OpenConnector
+ if err := cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return &connector{cfg: cfg}, nil
+}
+
+// OpenConnector implements driver.DriverContext.
+func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
+ cfg, err := ParseDSN(dsn)
+ if err != nil {
+ return nil, err
+ }
+ return &connector{
+ cfg: cfg,
+ }, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
new file mode 100644
index 0000000..93f3548
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -0,0 +1,560 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/rsa"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
+ errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
+ errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
+ errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
+)
+
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
+type Config struct {
+ User string // Username
+ Passwd string // Password (requires User)
+ Net string // Network type
+ Addr string // Network address (requires Net)
+ DBName string // Database name
+ Params map[string]string // Connection parameters
+ Collation string // Connection collation
+ Loc *time.Location // Location for time.Time values
+ MaxAllowedPacket int // Max packet size allowed
+ ServerPubKey string // Server public key name
+ pubKey *rsa.PublicKey // Server public key
+ TLSConfig string // TLS configuration name
+ tls *tls.Config // TLS configuration
+ Timeout time.Duration // Dial timeout
+ ReadTimeout time.Duration // I/O read timeout
+ WriteTimeout time.Duration // I/O write timeout
+
+ AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
+ AllowCleartextPasswords bool // Allows the cleartext client side plugin
+ AllowNativePasswords bool // Allows the native password authentication method
+ AllowOldPasswords bool // Allows the old insecure password method
+ CheckConnLiveness bool // Check connections for liveness before using them
+ ClientFoundRows bool // Return number of matching rows instead of rows changed
+ ColumnsWithAlias bool // Prepend table alias to column names
+ InterpolateParams bool // Interpolate placeholders into query string
+ MultiStatements bool // Allow multiple statements in one query
+ ParseTime bool // Parse time values to time.Time
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ CheckConnLiveness: true,
+ }
+}
+
+func (cfg *Config) Clone() *Config {
+ cp := *cfg
+ if cp.tls != nil {
+ cp.tls = cfg.tls.Clone()
+ }
+ if len(cp.Params) > 0 {
+ cp.Params = make(map[string]string, len(cfg.Params))
+ for k, v := range cfg.Params {
+ cp.Params[k] = v
+ }
+ }
+ if cfg.pubKey != nil {
+ cp.pubKey = &rsa.PublicKey{
+ N: new(big.Int).Set(cfg.pubKey.N),
+ E: cfg.pubKey.E,
+ }
+ }
+ return &cp
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ switch cfg.TLSConfig {
+ case "false", "":
+ // don't set anything
+ case "true":
+ cfg.tls = &tls.Config{}
+ case "skip-verify", "preferred":
+ cfg.tls = &tls.Config{InsecureSkipVerify: true}
+ default:
+ cfg.tls = getTLSConfigClone(cfg.TLSConfig)
+ if cfg.tls == nil {
+ return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
+ }
+ }
+
+ if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify {
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.tls.ServerName = host
+ }
+ }
+
+ if cfg.ServerPubKey != "" {
+ cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
+ if cfg.pubKey == nil {
+ return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
+ }
+ }
+
+ return nil
+}
+
+func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
+ buf.Grow(1 + len(name) + 1 + len(value))
+ if !*hasParam {
+ *hasParam = true
+ buf.WriteByte('?')
+ } else {
+ buf.WriteByte('&')
+ }
+ buf.WriteString(name)
+ buf.WriteByte('=')
+ buf.WriteString(value)
+}
+
+// FormatDSN formats the given Config into a DSN string which can be passed to
+// the driver.
+func (cfg *Config) FormatDSN() string {
+ var buf bytes.Buffer
+
+ // [username[:password]@]
+ if len(cfg.User) > 0 {
+ buf.WriteString(cfg.User)
+ if len(cfg.Passwd) > 0 {
+ buf.WriteByte(':')
+ buf.WriteString(cfg.Passwd)
+ }
+ buf.WriteByte('@')
+ }
+
+ // [protocol[(address)]]
+ if len(cfg.Net) > 0 {
+ buf.WriteString(cfg.Net)
+ if len(cfg.Addr) > 0 {
+ buf.WriteByte('(')
+ buf.WriteString(cfg.Addr)
+ buf.WriteByte(')')
+ }
+ }
+
+ // /dbname
+ buf.WriteByte('/')
+ buf.WriteString(cfg.DBName)
+
+ // [?param1=value1&...¶mN=valueN]
+ hasParam := false
+
+ if cfg.AllowAllFiles {
+ hasParam = true
+ buf.WriteString("?allowAllFiles=true")
+ }
+
+ if cfg.AllowCleartextPasswords {
+ writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true")
+ }
+
+ if !cfg.AllowNativePasswords {
+ writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false")
+ }
+
+ if cfg.AllowOldPasswords {
+ writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true")
+ }
+
+ if !cfg.CheckConnLiveness {
+ writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false")
+ }
+
+ if cfg.ClientFoundRows {
+ writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
+ }
+
+ if col := cfg.Collation; col != defaultCollation && len(col) > 0 {
+ writeDSNParam(&buf, &hasParam, "collation", col)
+ }
+
+ if cfg.ColumnsWithAlias {
+ writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
+ }
+
+ if cfg.InterpolateParams {
+ writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
+ }
+
+ if cfg.Loc != time.UTC && cfg.Loc != nil {
+ writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String()))
+ }
+
+ if cfg.MultiStatements {
+ writeDSNParam(&buf, &hasParam, "multiStatements", "true")
+ }
+
+ if cfg.ParseTime {
+ writeDSNParam(&buf, &hasParam, "parseTime", "true")
+ }
+
+ if cfg.ReadTimeout > 0 {
+ writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
+ }
+
+ if cfg.RejectReadOnly {
+ writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true")
+ }
+
+ if len(cfg.ServerPubKey) > 0 {
+ writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey))
+ }
+
+ if cfg.Timeout > 0 {
+ writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String())
+ }
+
+ if len(cfg.TLSConfig) > 0 {
+ writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig))
+ }
+
+ if cfg.WriteTimeout > 0 {
+ writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String())
+ }
+
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
+ writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket))
+ }
+
+ // other params
+ if cfg.Params != nil {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
+ writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param]))
+ }
+ }
+
+ return buf.String()
+}
+
+// ParseDSN parses the DSN string to a Config
+func ParseDSN(dsn string) (cfg *Config, err error) {
+ // New config with some default values
+ cfg = NewConfig()
+
+ // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
+ // Find the last '/' (since the password or the net addr might contain a '/')
+ foundSlash := false
+ for i := len(dsn) - 1; i >= 0; i-- {
+ if dsn[i] == '/' {
+ foundSlash = true
+ var j, k int
+
+ // left part is empty if i <= 0
+ if i > 0 {
+ // [username[:password]@][protocol[(address)]]
+ // Find the last '@' in dsn[:i]
+ for j = i; j >= 0; j-- {
+ if dsn[j] == '@' {
+ // username[:password]
+ // Find the first ':' in dsn[:j]
+ for k = 0; k < j; k++ {
+ if dsn[k] == ':' {
+ cfg.Passwd = dsn[k+1 : j]
+ break
+ }
+ }
+ cfg.User = dsn[:k]
+
+ break
+ }
+ }
+
+ // [protocol[(address)]]
+ // Find the first '(' in dsn[j+1:i]
+ for k = j + 1; k < i; k++ {
+ if dsn[k] == '(' {
+ // dsn[i-1] must be == ')' if an address is specified
+ if dsn[i-1] != ')' {
+ if strings.ContainsRune(dsn[k+1:i], ')') {
+ return nil, errInvalidDSNUnescaped
+ }
+ return nil, errInvalidDSNAddr
+ }
+ cfg.Addr = dsn[k+1 : i-1]
+ break
+ }
+ }
+ cfg.Net = dsn[j+1 : k]
+ }
+
+ // dbname[?param1=value1&...¶mN=valueN]
+ // Find the first '?' in dsn[i+1:]
+ for j = i + 1; j < len(dsn); j++ {
+ if dsn[j] == '?' {
+ if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
+ return
+ }
+ break
+ }
+ }
+ cfg.DBName = dsn[i+1 : j]
+
+ break
+ }
+ }
+
+ if !foundSlash && len(dsn) > 0 {
+ return nil, errInvalidDSNNoSlash
+ }
+
+ if err = cfg.normalize(); err != nil {
+ return nil, err
+ }
+ return
+}
+
+// parseDSNParams parses the DSN "query string"
+// Values must be url.QueryEscape'ed
+func parseDSNParams(cfg *Config, params string) (err error) {
+ for _, v := range strings.Split(params, "&") {
+ param := strings.SplitN(v, "=", 2)
+ if len(param) != 2 {
+ continue
+ }
+
+ // cfg params
+ switch value := param[1]; param[0] {
+ // Disable INFILE allowlist / enable all files
+ case "allowAllFiles":
+ var isBool bool
+ cfg.AllowAllFiles, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use cleartext authentication mode (MySQL 5.5.10+)
+ case "allowCleartextPasswords":
+ var isBool bool
+ cfg.AllowCleartextPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use native password authentication
+ case "allowNativePasswords":
+ var isBool bool
+ cfg.AllowNativePasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Use old authentication mode (pre MySQL 4.1)
+ case "allowOldPasswords":
+ var isBool bool
+ cfg.AllowOldPasswords, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Check connections for Liveness before using them
+ case "checkConnLiveness":
+ var isBool bool
+ cfg.CheckConnLiveness, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Switch "rowsAffected" mode
+ case "clientFoundRows":
+ var isBool bool
+ cfg.ClientFoundRows, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Collation
+ case "collation":
+ cfg.Collation = value
+ break
+
+ case "columnsWithAlias":
+ var isBool bool
+ cfg.ColumnsWithAlias, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Compression
+ case "compress":
+ return errors.New("compression not implemented yet")
+
+ // Enable client side placeholder substitution
+ case "interpolateParams":
+ var isBool bool
+ cfg.InterpolateParams, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Time Location
+ case "loc":
+ if value, err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ cfg.Loc, err = time.LoadLocation(value)
+ if err != nil {
+ return
+ }
+
+ // multiple statements in one query
+ case "multiStatements":
+ var isBool bool
+ cfg.MultiStatements, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // time.Time parsing
+ case "parseTime":
+ var isBool bool
+ cfg.ParseTime, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // I/O read Timeout
+ case "readTimeout":
+ cfg.ReadTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // Reject read-only connections
+ case "rejectReadOnly":
+ var isBool bool
+ cfg.RejectReadOnly, isBool = readBool(value)
+ if !isBool {
+ return errors.New("invalid bool value: " + value)
+ }
+
+ // Server public key
+ case "serverPubKey":
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for server pub key name: %v", err)
+ }
+ cfg.ServerPubKey = name
+
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
+ // Dial Timeout
+ case "timeout":
+ cfg.Timeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+
+ // TLS-Encryption
+ case "tls":
+ boolValue, isBool := readBool(value)
+ if isBool {
+ if boolValue {
+ cfg.TLSConfig = "true"
+ } else {
+ cfg.TLSConfig = "false"
+ }
+ } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
+ cfg.TLSConfig = vl
+ } else {
+ name, err := url.QueryUnescape(value)
+ if err != nil {
+ return fmt.Errorf("invalid value for TLS config name: %v", err)
+ }
+ cfg.TLSConfig = name
+ }
+
+ // I/O write Timeout
+ case "writeTimeout":
+ cfg.WriteTimeout, err = time.ParseDuration(value)
+ if err != nil {
+ return
+ }
+ case "maxAllowedPacket":
+ cfg.MaxAllowedPacket, err = strconv.Atoi(value)
+ if err != nil {
+ return
+ }
+ default:
+ // lazy init
+ if cfg.Params == nil {
+ cfg.Params = make(map[string]string)
+ }
+
+ if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil {
+ return
+ }
+ }
+ }
+
+ return
+}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
new file mode 100644
index 0000000..760782f
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -0,0 +1,65 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "os"
+)
+
+// Various errors the driver might return. Can change between driver versions.
+var (
+ ErrInvalidConn = errors.New("invalid connection")
+ ErrMalformPkt = errors.New("malformed packet")
+ ErrNoTLS = errors.New("TLS requested but server does not support TLS")
+ ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
+ ErrNativePassword = errors.New("this user requires mysql native password authentication.")
+ ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
+ ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
+ ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
+ ErrPktSync = errors.New("commands out of sync. You can't run this command now")
+ ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
+ ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
+ ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
+)
+
+var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
+
+// Logger is used to log critical error messages.
+type Logger interface {
+ Print(v ...interface{})
+}
+
+// SetLogger is used to set the logger for critical errors.
+// The initial logger is os.Stderr.
+func SetLogger(logger Logger) error {
+ if logger == nil {
+ return errors.New("logger is nil")
+ }
+ errLog = logger
+ return nil
+}
+
+// MySQLError is an error type which represents a single MySQL error
+type MySQLError struct {
+ Number uint16
+ Message string
+}
+
+func (me *MySQLError) Error() string {
+ return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 0000000..ed6c7a3
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,194 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+func (mf *mysqlField) typeDatabaseName() string {
+ switch mf.fieldType {
+ case fieldTypeBit:
+ return "BIT"
+ case fieldTypeBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TEXT"
+ }
+ return "BLOB"
+ case fieldTypeDate:
+ return "DATE"
+ case fieldTypeDateTime:
+ return "DATETIME"
+ case fieldTypeDecimal:
+ return "DECIMAL"
+ case fieldTypeDouble:
+ return "DOUBLE"
+ case fieldTypeEnum:
+ return "ENUM"
+ case fieldTypeFloat:
+ return "FLOAT"
+ case fieldTypeGeometry:
+ return "GEOMETRY"
+ case fieldTypeInt24:
+ return "MEDIUMINT"
+ case fieldTypeJSON:
+ return "JSON"
+ case fieldTypeLong:
+ return "INT"
+ case fieldTypeLongBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "LONGTEXT"
+ }
+ return "LONGBLOB"
+ case fieldTypeLongLong:
+ return "BIGINT"
+ case fieldTypeMediumBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "MEDIUMTEXT"
+ }
+ return "MEDIUMBLOB"
+ case fieldTypeNewDate:
+ return "DATE"
+ case fieldTypeNewDecimal:
+ return "DECIMAL"
+ case fieldTypeNULL:
+ return "NULL"
+ case fieldTypeSet:
+ return "SET"
+ case fieldTypeShort:
+ return "SMALLINT"
+ case fieldTypeString:
+ if mf.charSet == collations[binaryCollation] {
+ return "BINARY"
+ }
+ return "CHAR"
+ case fieldTypeTime:
+ return "TIME"
+ case fieldTypeTimestamp:
+ return "TIMESTAMP"
+ case fieldTypeTiny:
+ return "TINYINT"
+ case fieldTypeTinyBLOB:
+ if mf.charSet != collations[binaryCollation] {
+ return "TINYTEXT"
+ }
+ return "TINYBLOB"
+ case fieldTypeVarChar:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeVarString:
+ if mf.charSet == collations[binaryCollation] {
+ return "VARBINARY"
+ }
+ return "VARCHAR"
+ case fieldTypeYear:
+ return "YEAR"
+ default:
+ return ""
+ }
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(nullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+ charSet uint8
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/fuzz.go b/vendor/github.com/go-sql-driver/mysql/fuzz.go
new file mode 100644
index 0000000..fa75adf
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fuzz.go
@@ -0,0 +1,24 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
+//
+// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build gofuzz
+
+package mysql
+
+import (
+ "database/sql"
+)
+
+func Fuzz(data []byte) int {
+ db, err := sql.Open("mysql", string(data))
+ if err != nil {
+ return 0
+ }
+ db.Close()
+ return 1
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/go.mod b/vendor/github.com/go-sql-driver/mysql/go.mod
new file mode 100644
index 0000000..fffbf6a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/go.mod
@@ -0,0 +1,3 @@
+module github.com/go-sql-driver/mysql
+
+go 1.10
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
new file mode 100644
index 0000000..60effdf
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -0,0 +1,182 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ fileRegister map[string]bool
+ fileRegisterLock sync.RWMutex
+ readerRegister map[string]func() io.Reader
+ readerRegisterLock sync.RWMutex
+)
+
+// RegisterLocalFile adds the given file to the file allowlist,
+// so that it can be used by "LOAD DATA LOCAL INFILE ".
+// Alternatively you can allow the use of all local files with
+// the DSN parameter 'allowAllFiles=true'
+//
+// filePath := "/home/gopher/data.csv"
+// mysql.RegisterLocalFile(filePath)
+// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ // lazy map init
+ if fileRegister == nil {
+ fileRegister = make(map[string]bool)
+ }
+
+ fileRegister[strings.Trim(filePath, `"`)] = true
+ fileRegisterLock.Unlock()
+}
+
+// DeregisterLocalFile removes the given filepath from the allowlist.
+func DeregisterLocalFile(filePath string) {
+ fileRegisterLock.Lock()
+ delete(fileRegister, strings.Trim(filePath, `"`))
+ fileRegisterLock.Unlock()
+}
+
+// RegisterReaderHandler registers a handler function which is used
+// to receive a io.Reader.
+// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
+// If the handler returns a io.ReadCloser Close() is called when the
+// request is finished.
+//
+// mysql.RegisterReaderHandler("data", func() io.Reader {
+// var csvReader io.Reader // Some Reader that returns CSV data
+// ... // Open Reader here
+// return csvReader
+// })
+// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
+// if err != nil {
+// ...
+//
+func RegisterReaderHandler(name string, handler func() io.Reader) {
+ readerRegisterLock.Lock()
+ // lazy map init
+ if readerRegister == nil {
+ readerRegister = make(map[string]func() io.Reader)
+ }
+
+ readerRegister[name] = handler
+ readerRegisterLock.Unlock()
+}
+
+// DeregisterReaderHandler removes the ReaderHandler function with
+// the given name from the registry.
+func DeregisterReaderHandler(name string) {
+ readerRegisterLock.Lock()
+ delete(readerRegister, name)
+ readerRegisterLock.Unlock()
+}
+
+func deferredClose(err *error, closer io.Closer) {
+ closeErr := closer.Close()
+ if *err == nil {
+ *err = closeErr
+ }
+}
+
+func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
+ var rdr io.Reader
+ var data []byte
+ packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
+ if mc.maxWriteSize < packetSize {
+ packetSize = mc.maxWriteSize
+ }
+
+ if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
+ // The server might return an an absolute path. See issue #355.
+ name = name[idx+8:]
+
+ readerRegisterLock.RLock()
+ handler, inMap := readerRegister[name]
+ readerRegisterLock.RUnlock()
+
+ if inMap {
+ rdr = handler()
+ if rdr != nil {
+ if cl, ok := rdr.(io.Closer); ok {
+ defer deferredClose(&err, cl)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is ", name)
+ }
+ } else {
+ err = fmt.Errorf("Reader '%s' is not registered", name)
+ }
+ } else { // File
+ name = strings.Trim(name, `"`)
+ fileRegisterLock.RLock()
+ fr := fileRegister[name]
+ fileRegisterLock.RUnlock()
+ if mc.cfg.AllowAllFiles || fr {
+ var file *os.File
+ var fi os.FileInfo
+
+ if file, err = os.Open(name); err == nil {
+ defer deferredClose(&err, file)
+
+ // get file size
+ if fi, err = file.Stat(); err == nil {
+ rdr = file
+ if fileSize := int(fi.Size()); fileSize < packetSize {
+ packetSize = fileSize
+ }
+ }
+ }
+ } else {
+ err = fmt.Errorf("local file '%s' is not registered", name)
+ }
+ }
+
+ // send content packets
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
+ data := make([]byte, 4+packetSize)
+ var n int
+ for err == nil {
+ n, err = rdr.Read(data[4:])
+ if n > 0 {
+ if ioErr := mc.writePacket(data[:4+n]); ioErr != nil {
+ return ioErr
+ }
+ }
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ }
+
+ // send empty packet (termination)
+ if data == nil {
+ data = make([]byte, 4)
+ }
+ if ioErr := mc.writePacket(data[:4]); ioErr != nil {
+ return ioErr
+ }
+
+ // read OK packet
+ if err == nil {
+ return mc.readResultOK()
+ }
+
+ mc.readPacket()
+ return err
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
new file mode 100644
index 0000000..651723a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime.go
@@ -0,0 +1,50 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "fmt"
+ "time"
+)
+
+// Scan implements the Scanner interface.
+// The value type must be time.Time or string / []byte (formatted time-string),
+// otherwise Scan fails.
+func (nt *NullTime) Scan(value interface{}) (err error) {
+ if value == nil {
+ nt.Time, nt.Valid = time.Time{}, false
+ return
+ }
+
+ switch v := value.(type) {
+ case time.Time:
+ nt.Time, nt.Valid = v, true
+ return
+ case []byte:
+ nt.Time, err = parseDateTime(v, time.UTC)
+ nt.Valid = (err == nil)
+ return
+ case string:
+ nt.Time, err = parseDateTime([]byte(v), time.UTC)
+ nt.Valid = (err == nil)
+ return
+ }
+
+ nt.Valid = false
+ return fmt.Errorf("Can't convert %T to time.Time", value)
+}
+
+// Value implements the driver Valuer interface.
+func (nt NullTime) Value() (driver.Value, error) {
+ if !nt.Valid {
+ return nil, nil
+ }
+ return nt.Time, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
new file mode 100644
index 0000000..453b4b3
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go
@@ -0,0 +1,40 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.13
+
+package mysql
+
+import (
+ "database/sql"
+)
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// This NullTime implementation is not driver-specific
+//
+// Deprecated: NullTime doesn't honor the loc DSN parameter.
+// NullTime.Scan interprets a time as UTC, not the loc DSN parameter.
+// Use sql.NullTime instead.
+type NullTime sql.NullTime
+
+// for internal use.
+// the mysql package uses sql.NullTime if it is available.
+// if not, the package uses mysql.NullTime.
+type nullTime = sql.NullTime // sql.NullTime is available
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
new file mode 100644
index 0000000..9f7ae27
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go
@@ -0,0 +1,39 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build !go1.13
+
+package mysql
+
+import (
+ "time"
+)
+
+// NullTime represents a time.Time that may be NULL.
+// NullTime implements the Scanner interface so
+// it can be used as a scan destination:
+//
+// var nt NullTime
+// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
+// ...
+// if nt.Valid {
+// // use nt.Time
+// } else {
+// // NULL value
+// }
+//
+// This NullTime implementation is not driver-specific
+type NullTime struct {
+ Time time.Time
+ Valid bool // Valid is true if Time is not NULL
+}
+
+// for internal use.
+// the mysql package uses sql.NullTime if it is available.
+// if not, the package uses mysql.NullTime.
+type nullTime = NullTime // sql.NullTime is not available
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
new file mode 100644
index 0000000..6664e5a
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -0,0 +1,1349 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "bytes"
+ "crypto/tls"
+ "database/sql/driver"
+ "encoding/binary"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// Packets documentation:
+// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
+
+// Read packet to buffer 'data'
+func (mc *mysqlConn) readPacket() ([]byte, error) {
+ var prevData []byte
+ for {
+ // read packet header
+ data, err := mc.buf.readNext(4)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // packet length [24 bit]
+ pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
+
+ // check packet sync [8 bit]
+ if data[3] != mc.sequence {
+ if data[3] > mc.sequence {
+ return nil, ErrPktSyncMul
+ }
+ return nil, ErrPktSync
+ }
+ mc.sequence++
+
+ // packets with length 0 terminate a previous packet which is a
+ // multiple of (2^24)-1 bytes long
+ if pktLen == 0 {
+ // there was no previous packet
+ if prevData == nil {
+ errLog.Print(ErrMalformPkt)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ return prevData, nil
+ }
+
+ // read packet body [pktLen bytes]
+ data, err = mc.buf.readNext(pktLen)
+ if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
+ errLog.Print(err)
+ mc.Close()
+ return nil, ErrInvalidConn
+ }
+
+ // return data if this was the last packet
+ if pktLen < maxPacketSize {
+ // zero allocations for non-split packets
+ if prevData == nil {
+ return data, nil
+ }
+
+ return append(prevData, data...), nil
+ }
+
+ prevData = append(prevData, data...)
+ }
+}
+
+// Write packet buffer 'data'
+func (mc *mysqlConn) writePacket(data []byte) error {
+ pktLen := len(data) - 4
+
+ if pktLen > mc.maxAllowedPacket {
+ return ErrPktTooLarge
+ }
+
+ // Perform a stale connection check. We only perform this check for
+ // the first query on a connection that has been checked out of the
+ // connection pool: a fresh connection from the pool is more likely
+ // to be stale, and it has not performed any previous writes that
+ // could cause data corruption, so it's safe to return ErrBadConn
+ // if the check fails.
+ if mc.reset {
+ mc.reset = false
+ conn := mc.netConn
+ if mc.rawConn != nil {
+ conn = mc.rawConn
+ }
+ var err error
+ // If this connection has a ReadTimeout which we've been setting on
+ // reads, reset it to its default value before we attempt a non-blocking
+ // read, otherwise the scheduler will just time us out before we can read
+ if mc.cfg.ReadTimeout != 0 {
+ err = conn.SetReadDeadline(time.Time{})
+ }
+ if err == nil && mc.cfg.CheckConnLiveness {
+ err = connCheck(conn)
+ }
+ if err != nil {
+ errLog.Print("closing bad idle connection: ", err)
+ mc.Close()
+ return driver.ErrBadConn
+ }
+ }
+
+ for {
+ var size int
+ if pktLen >= maxPacketSize {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+ size = maxPacketSize
+ } else {
+ data[0] = byte(pktLen)
+ data[1] = byte(pktLen >> 8)
+ data[2] = byte(pktLen >> 16)
+ size = pktLen
+ }
+ data[3] = mc.sequence
+
+ // Write packet
+ if mc.writeTimeout > 0 {
+ if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
+ return err
+ }
+ }
+
+ n, err := mc.netConn.Write(data[:4+size])
+ if err == nil && n == 4+size {
+ mc.sequence++
+ if size != maxPacketSize {
+ return nil
+ }
+ pktLen -= size
+ data = data[size:]
+ continue
+ }
+
+ // Handle error
+ if err == nil { // n != len(data)
+ mc.cleanup()
+ errLog.Print(ErrMalformPkt)
+ } else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
+ errLog.Print(err)
+ }
+ return ErrInvalidConn
+ }
+}
+
+/******************************************************************************
+* Initialization Process *
+******************************************************************************/
+
+// Handshake Initialization Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
+func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
+ data, err = mc.readPacket()
+ if err != nil {
+ // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
+ // in connection initialization we don't risk retrying non-idempotent actions.
+ if err == ErrInvalidConn {
+ return nil, "", driver.ErrBadConn
+ }
+ return
+ }
+
+ if data[0] == iERR {
+ return nil, "", mc.handleErrorPacket(data)
+ }
+
+ // protocol version [1 byte]
+ if data[0] < minProtocolVersion {
+ return nil, "", fmt.Errorf(
+ "unsupported protocol version %d. Version %d or higher is required",
+ data[0],
+ minProtocolVersion,
+ )
+ }
+
+ // server version [null terminated string]
+ // connection id [4 bytes]
+ pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
+
+ // first part of the password cipher [8 bytes]
+ authData := data[pos : pos+8]
+
+ // (filler) always 0x00 [1 byte]
+ pos += 8 + 1
+
+ // capability flags (lower 2 bytes) [2 bytes]
+ mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ if mc.flags&clientProtocol41 == 0 {
+ return nil, "", ErrOldProtocol
+ }
+ if mc.flags&clientSSL == 0 && mc.cfg.tls != nil {
+ if mc.cfg.TLSConfig == "preferred" {
+ mc.cfg.tls = nil
+ } else {
+ return nil, "", ErrNoTLS
+ }
+ }
+ pos += 2
+
+ if len(data) > pos {
+ // character set [1 byte]
+ // status flags [2 bytes]
+ // capability flags (upper 2 bytes) [2 bytes]
+ // length of auth-plugin-data [1 byte]
+ // reserved (all [00]) [10 bytes]
+ pos += 1 + 2 + 2 + 1 + 10
+
+ // second part of the password cipher [mininum 13 bytes],
+ // where len=MAX(13, length of auth-plugin-data - 8)
+ //
+ // The web documentation is ambiguous about the length. However,
+ // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
+ // the 13th byte is "\0 byte, terminating the second part of
+ // a scramble". So the second part of the password cipher is
+ // a NULL terminated string that's at least 13 bytes with the
+ // last byte being NULL.
+ //
+ // The official Python library uses the fixed length 12
+ // which seems to work but technically could have a hidden bug.
+ authData = append(authData, data[pos:pos+12]...)
+ pos += 13
+
+ // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
+ // \NUL otherwise
+ if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
+ plugin = string(data[pos : pos+end])
+ } else {
+ plugin = string(data[pos:])
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [20]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+ }
+
+ // make a memory safe copy of the cipher slice
+ var b [8]byte
+ copy(b[:], authData)
+ return b[:], plugin, nil
+}
+
+// Client Authentication Packet
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
+func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
+ // Adjust client flags based on server support
+ clientFlags := clientProtocol41 |
+ clientSecureConn |
+ clientLongPassword |
+ clientTransactions |
+ clientLocalFiles |
+ clientPluginAuth |
+ clientMultiResults |
+ mc.flags&clientLongFlag
+
+ if mc.cfg.ClientFoundRows {
+ clientFlags |= clientFoundRows
+ }
+
+ // To enable TLS / SSL
+ if mc.cfg.tls != nil {
+ clientFlags |= clientSSL
+ }
+
+ if mc.cfg.MultiStatements {
+ clientFlags |= clientMultiStatements
+ }
+
+ // encode length of the auth plugin data
+ var authRespLEIBuf [9]byte
+ authRespLen := len(authResp)
+ authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
+ if len(authRespLEI) > 1 {
+ // if the length can not be written in 1 byte, it must be written as a
+ // length encoded integer
+ clientFlags |= clientPluginAuthLenEncClientData
+ }
+
+ pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
+
+ // To specify a db name
+ if n := len(mc.cfg.DBName); n > 0 {
+ clientFlags |= clientConnectWithDB
+ pktLen += n + 1
+ }
+
+ // Calculate packet length and get buffer with that size
+ data, err := mc.buf.takeSmallBuffer(pktLen + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // ClientFlags [32 bit]
+ data[4] = byte(clientFlags)
+ data[5] = byte(clientFlags >> 8)
+ data[6] = byte(clientFlags >> 16)
+ data[7] = byte(clientFlags >> 24)
+
+ // MaxPacketSize [32 bit] (none)
+ data[8] = 0x00
+ data[9] = 0x00
+ data[10] = 0x00
+ data[11] = 0x00
+
+ // Charset [1 byte]
+ var found bool
+ data[12], found = collations[mc.cfg.Collation]
+ if !found {
+ // Note possibility for false negatives:
+ // could be triggered although the collation is valid if the
+ // collations map does not contain entries the server supports.
+ return errors.New("unknown collation")
+ }
+
+ // Filler [23 bytes] (all 0x00)
+ pos := 13
+ for ; pos < 13+23; pos++ {
+ data[pos] = 0
+ }
+
+ // SSL Connection Request Packet
+ // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
+ if mc.cfg.tls != nil {
+ // Send TLS / SSL request packet
+ if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
+ return err
+ }
+
+ // Switch to TLS
+ tlsConn := tls.Client(mc.netConn, mc.cfg.tls)
+ if err := tlsConn.Handshake(); err != nil {
+ return err
+ }
+ mc.rawConn = mc.netConn
+ mc.netConn = tlsConn
+ mc.buf.nc = tlsConn
+ }
+
+ // User [null terminated string]
+ if len(mc.cfg.User) > 0 {
+ pos += copy(data[pos:], mc.cfg.User)
+ }
+ data[pos] = 0x00
+ pos++
+
+ // Auth Data [length encoded integer]
+ pos += copy(data[pos:], authRespLEI)
+ pos += copy(data[pos:], authResp)
+
+ // Databasename [null terminated string]
+ if len(mc.cfg.DBName) > 0 {
+ pos += copy(data[pos:], mc.cfg.DBName)
+ data[pos] = 0x00
+ pos++
+ }
+
+ pos += copy(data[pos:], plugin)
+ data[pos] = 0x00
+ pos++
+
+ // Send Auth packet
+ return mc.writePacket(data[:pos])
+}
+
+// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
+func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
+ pktLen := 4 + len(authData)
+ data, err := mc.buf.takeSmallBuffer(pktLen)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add the auth data [EOF]
+ copy(data[4:], authData)
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Command Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) writeCommandPacket(command byte) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data, err := mc.buf.takeSmallBuffer(4 + 1)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ pktLen := 1 + len(arg)
+ data, err := mc.buf.takeBuffer(pktLen + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg
+ copy(data[5:], arg)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
+ // Reset Packet Sequence
+ mc.sequence = 0
+
+ data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // Add command byte
+ data[4] = command
+
+ // Add arg [32 bit]
+ data[5] = byte(arg)
+ data[6] = byte(arg >> 8)
+ data[7] = byte(arg >> 16)
+ data[8] = byte(arg >> 24)
+
+ // Send CMD packet
+ return mc.writePacket(data)
+}
+
+/******************************************************************************
+* Result Packets *
+******************************************************************************/
+
+func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // packet indicator
+ switch data[0] {
+
+ case iOK:
+ return nil, "", mc.handleOkPacket(data)
+
+ case iAuthMoreData:
+ return data[1:], "", err
+
+ case iEOF:
+ if len(data) == 1 {
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, "mysql_old_password", nil
+ }
+ pluginEndIndex := bytes.IndexByte(data, 0x00)
+ if pluginEndIndex < 0 {
+ return nil, "", ErrMalformPkt
+ }
+ plugin := string(data[1:pluginEndIndex])
+ authData := data[pluginEndIndex+1:]
+ return authData, plugin, nil
+
+ default: // Error otherwise
+ return nil, "", mc.handleErrorPacket(data)
+ }
+}
+
+// Returns error if Packet is not an 'Result OK'-Packet
+func (mc *mysqlConn) readResultOK() error {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ if data[0] == iOK {
+ return mc.handleOkPacket(data)
+ }
+ return mc.handleErrorPacket(data)
+}
+
+// Result Set Header Packet
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
+func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) {
+ data, err := mc.readPacket()
+ if err == nil {
+ switch data[0] {
+
+ case iOK:
+ return 0, mc.handleOkPacket(data)
+
+ case iERR:
+ return 0, mc.handleErrorPacket(data)
+
+ case iLocalInFile:
+ return 0, mc.handleInFileRequest(string(data[1:]))
+ }
+
+ // column count
+ num, _, n := readLengthEncodedInteger(data)
+ if n-len(data) == 0 {
+ return int(num), nil
+ }
+
+ return 0, ErrMalformPkt
+ }
+ return 0, err
+}
+
+// Error Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
+func (mc *mysqlConn) handleErrorPacket(data []byte) error {
+ if data[0] != iERR {
+ return ErrMalformPkt
+ }
+
+ // 0xff [1 byte]
+
+ // Error Number [16 bit uint]
+ errno := binary.LittleEndian.Uint16(data[1:3])
+
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
+ pos := 3
+
+ // SQL State [optional: # + 5bytes string]
+ if data[3] == 0x23 {
+ //sqlstate := string(data[4 : 4+5])
+ pos = 9
+ }
+
+ // Error Message [string]
+ return &MySQLError{
+ Number: errno,
+ Message: string(data[pos:]),
+ }
+}
+
+func readStatus(b []byte) statusFlag {
+ return statusFlag(b[0]) | statusFlag(b[1])<<8
+}
+
+// Ok Packet
+// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
+func (mc *mysqlConn) handleOkPacket(data []byte) error {
+ var n, m int
+
+ // 0x00 [1 byte]
+
+ // Affected rows [Length Coded Binary]
+ mc.affectedRows, _, n = readLengthEncodedInteger(data[1:])
+
+ // Insert id [Length Coded Binary]
+ mc.insertId, _, m = readLengthEncodedInteger(data[1+n:])
+
+ // server_status [2 bytes]
+ mc.status = readStatus(data[1+n+m : 1+n+m+2])
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
+ }
+
+ // warning count [2 bytes]
+
+ return nil
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
+func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
+ columns := make([]mysqlField, count)
+
+ for i := 0; ; i++ {
+ data, err := mc.readPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
+ if i == count {
+ return columns, nil
+ }
+ return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
+ }
+
+ // Catalog
+ pos, err := skipLengthEncodedString(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // Database [len coded string]
+ n, err := skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Table [len coded string]
+ if mc.cfg.ColumnsWithAlias {
+ tableName, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ columns[i].tableName = string(tableName)
+ } else {
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+ }
+
+ // Original table [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Name [len coded string]
+ name, _, n, err := readLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ columns[i].name = string(name)
+ pos += n
+
+ // Original name [len coded string]
+ n, err = skipLengthEncodedString(data[pos:])
+ if err != nil {
+ return nil, err
+ }
+ pos += n
+
+ // Filler [uint8]
+ pos++
+
+ // Charset [charset, collation uint8]
+ columns[i].charSet = data[pos]
+ pos += 2
+
+ // Length [uint32]
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
+
+ // Field type [uint8]
+ columns[i].fieldType = fieldType(data[pos])
+ pos++
+
+ // Flags [uint16]
+ columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ pos += 2
+
+ // Decimals [uint8]
+ columns[i].decimals = data[pos]
+ //pos++
+
+ // Default value [len coded binary]
+ //if pos < len(data) {
+ // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
+ //}
+ }
+}
+
+// Read Packets as Field Packets until EOF-Packet or an Error appears
+// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
+func (rows *textRows) readRow(dest []driver.Value) error {
+ mc := rows.mc
+
+ if rows.rs.done {
+ return io.EOF
+ }
+
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ // server_status [2 bytes]
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ if data[0] == iERR {
+ rows.mc = nil
+ return mc.handleErrorPacket(data)
+ }
+
+ // RowSet Packet
+ var n int
+ var isNull bool
+ pos := 0
+
+ for i := range dest {
+ // Read bytes and convert to string
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ if !mc.parseTime {
+ continue
+ } else {
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeTimestamp, fieldTypeDateTime,
+ fieldTypeDate, fieldTypeNewDate:
+ dest[i], err = parseDateTime(
+ dest[i].([]byte),
+ mc.cfg.Loc,
+ )
+ if err == nil {
+ continue
+ }
+ default:
+ continue
+ }
+ }
+
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err // err != nil
+ }
+
+ return nil
+}
+
+// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
+func (mc *mysqlConn) readUntilEOF() error {
+ for {
+ data, err := mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ switch data[0] {
+ case iERR:
+ return mc.handleErrorPacket(data)
+ case iEOF:
+ if len(data) == 5 {
+ mc.status = readStatus(data[3:])
+ }
+ return nil
+ }
+ }
+}
+
+/******************************************************************************
+* Prepared Statements *
+******************************************************************************/
+
+// Prepare Result Packets
+// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
+func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
+ data, err := stmt.mc.readPacket()
+ if err == nil {
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ return 0, stmt.mc.handleErrorPacket(data)
+ }
+
+ // statement id [4 bytes]
+ stmt.id = binary.LittleEndian.Uint32(data[1:5])
+
+ // Column count [16 bit uint]
+ columnCount := binary.LittleEndian.Uint16(data[5:7])
+
+ // Param count [16 bit uint]
+ stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
+
+ // Reserved [8 bit]
+
+ // Warning count [16 bit uint]
+
+ return columnCount, nil
+ }
+ return 0, err
+}
+
+// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
+func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
+ maxLen := stmt.mc.maxAllowedPacket - 1
+ pktLen := maxLen
+
+ // After the header (bytes 0-3) follows before the data:
+ // 1 byte command
+ // 4 bytes stmtID
+ // 2 bytes paramID
+ const dataOffset = 1 + 4 + 2
+
+ // Cannot use the write buffer since
+ // a) the buffer is too small
+ // b) it is in use
+ data := make([]byte, 4+1+4+2+len(arg))
+
+ copy(data[4+dataOffset:], arg)
+
+ for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
+ if dataOffset+argLen < maxLen {
+ pktLen = dataOffset + argLen
+ }
+
+ stmt.mc.sequence = 0
+ // Add command byte [1 byte]
+ data[4] = comStmtSendLongData
+
+ // Add stmtID [32 bit]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // Add paramID [16 bit]
+ data[9] = byte(paramID)
+ data[10] = byte(paramID >> 8)
+
+ // Send CMD packet
+ err := stmt.mc.writePacket(data[:4+pktLen])
+ if err == nil {
+ data = data[pktLen-dataOffset:]
+ continue
+ }
+ return err
+
+ }
+
+ // Reset Packet Sequence
+ stmt.mc.sequence = 0
+ return nil
+}
+
+// Execute Prepared Statement
+// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
+func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
+ if len(args) != stmt.paramCount {
+ return fmt.Errorf(
+ "argument count mismatch (got: %d; has: %d)",
+ len(args),
+ stmt.paramCount,
+ )
+ }
+
+ const minPktLen = 4 + 1 + 4 + 1 + 4
+ mc := stmt.mc
+
+ // Determine threshold dynamically to avoid packet size shortage.
+ longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
+ if longDataSize < 64 {
+ longDataSize = 64
+ }
+
+ // Reset packet-sequence
+ mc.sequence = 0
+
+ var data []byte
+ var err error
+
+ if len(args) == 0 {
+ data, err = mc.buf.takeBuffer(minPktLen)
+ } else {
+ data, err = mc.buf.takeCompleteBuffer()
+ // In this case the len(data) == cap(data) which is used to optimise the flow below.
+ }
+ if err != nil {
+ // cannot take the buffer. Something must be wrong with the connection
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+
+ // command [1 byte]
+ data[4] = comStmtExecute
+
+ // statement_id [4 bytes]
+ data[5] = byte(stmt.id)
+ data[6] = byte(stmt.id >> 8)
+ data[7] = byte(stmt.id >> 16)
+ data[8] = byte(stmt.id >> 24)
+
+ // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
+ data[9] = 0x00
+
+ // iteration_count (uint32(1)) [4 bytes]
+ data[10] = 0x01
+ data[11] = 0x00
+ data[12] = 0x00
+ data[13] = 0x00
+
+ if len(args) > 0 {
+ pos := minPktLen
+
+ var nullMask []byte
+ if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
+ // buffer has to be extended but we don't know by how much so
+ // we depend on append after all data with known sizes fit.
+ // We stop at that because we deal with a lot of columns here
+ // which makes the required allocation size hard to guess.
+ tmp := make([]byte, pos+maskLen+typesLen)
+ copy(tmp[:pos], data[:pos])
+ data = tmp
+ nullMask = data[pos : pos+maskLen]
+ // No need to clean nullMask as make ensures that.
+ pos += maskLen
+ } else {
+ nullMask = data[pos : pos+maskLen]
+ for i := range nullMask {
+ nullMask[i] = 0
+ }
+ pos += maskLen
+ }
+
+ // newParameterBoundFlag 1 [1 byte]
+ data[pos] = 0x01
+ pos++
+
+ // type of each parameter [len(args)*2 bytes]
+ paramTypes := data[pos:]
+ pos += len(args) * 2
+
+ // value of each parameter [n bytes]
+ paramValues := data[pos:pos]
+ valuesCap := cap(paramValues)
+
+ for i, arg := range args {
+ // build NULL-bitmap
+ if arg == nil {
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+ continue
+ }
+
+ if v, ok := arg.(json.RawMessage); ok {
+ arg = []byte(v)
+ }
+ // cache types and values
+ switch v := arg.(type) {
+ case int64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case uint64:
+ paramTypes[i+i] = byte(fieldTypeLongLong)
+ paramTypes[i+i+1] = 0x80 // type is unsigned
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ uint64(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(uint64(v))...,
+ )
+ }
+
+ case float64:
+ paramTypes[i+i] = byte(fieldTypeDouble)
+ paramTypes[i+i+1] = 0x00
+
+ if cap(paramValues)-len(paramValues)-8 >= 0 {
+ paramValues = paramValues[:len(paramValues)+8]
+ binary.LittleEndian.PutUint64(
+ paramValues[len(paramValues)-8:],
+ math.Float64bits(v),
+ )
+ } else {
+ paramValues = append(paramValues,
+ uint64ToBytes(math.Float64bits(v))...,
+ )
+ }
+
+ case bool:
+ paramTypes[i+i] = byte(fieldTypeTiny)
+ paramTypes[i+i+1] = 0x00
+
+ if v {
+ paramValues = append(paramValues, 0x01)
+ } else {
+ paramValues = append(paramValues, 0x00)
+ }
+
+ case []byte:
+ // Common case (non-nil value) first
+ if v != nil {
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, v); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+
+ // Handle []byte(nil) as a NULL value
+ nullMask[i/8] |= 1 << (uint(i) & 7)
+ paramTypes[i+i] = byte(fieldTypeNULL)
+ paramTypes[i+i+1] = 0x00
+
+ case string:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ if len(v) < longDataSize {
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(v)),
+ )
+ paramValues = append(paramValues, v...)
+ } else {
+ if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
+ return err
+ }
+ }
+
+ case time.Time:
+ paramTypes[i+i] = byte(fieldTypeString)
+ paramTypes[i+i+1] = 0x00
+
+ var a [64]byte
+ var b = a[:0]
+
+ if v.IsZero() {
+ b = append(b, "0000-00-00"...)
+ } else {
+ b, err = appendDateTime(b, v.In(mc.cfg.Loc))
+ if err != nil {
+ return err
+ }
+ }
+
+ paramValues = appendLengthEncodedInteger(paramValues,
+ uint64(len(b)),
+ )
+ paramValues = append(paramValues, b...)
+
+ default:
+ return fmt.Errorf("cannot convert type: %T", arg)
+ }
+ }
+
+ // Check if param values exceeded the available buffer
+ // In that case we must build the data packet with the new values buffer
+ if valuesCap != cap(paramValues) {
+ data = append(data[:pos], paramValues...)
+ if err = mc.buf.store(data); err != nil {
+ errLog.Print(err)
+ return errBadConnNoWrite
+ }
+ }
+
+ pos += len(paramValues)
+ data = data[:pos]
+ }
+
+ return mc.writePacket(data)
+}
+
+func (mc *mysqlConn) discardResults() error {
+ for mc.status&statusMoreResultsExists != 0 {
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return err
+ }
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
+func (rows *binaryRows) readRow(dest []driver.Value) error {
+ data, err := rows.mc.readPacket()
+ if err != nil {
+ return err
+ }
+
+ // packet indicator [1 byte]
+ if data[0] != iOK {
+ // EOF Packet
+ if data[0] == iEOF && len(data) == 5 {
+ rows.mc.status = readStatus(data[3:])
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ }
+ return io.EOF
+ }
+ mc := rows.mc
+ rows.mc = nil
+
+ // Error otherwise
+ return mc.handleErrorPacket(data)
+ }
+
+ // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
+ pos := 1 + (len(dest)+7+2)>>3
+ nullMask := data[1:pos]
+
+ for i := range dest {
+ // Field is NULL
+ // (byte >> bit-pos) % 2 == 1
+ if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
+ dest[i] = nil
+ continue
+ }
+
+ // Convert to byte-coded string
+ switch rows.rs.columns[i].fieldType {
+ case fieldTypeNULL:
+ dest[i] = nil
+ continue
+
+ // Numeric Types
+ case fieldTypeTiny:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(data[pos])
+ } else {
+ dest[i] = int64(int8(data[pos]))
+ }
+ pos++
+ continue
+
+ case fieldTypeShort, fieldTypeYear:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
+ } else {
+ dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
+ }
+ pos += 2
+ continue
+
+ case fieldTypeInt24, fieldTypeLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ } else {
+ dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ }
+ pos += 4
+ continue
+
+ case fieldTypeLongLong:
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
+ val := binary.LittleEndian.Uint64(data[pos : pos+8])
+ if val > math.MaxInt64 {
+ dest[i] = uint64ToString(val)
+ } else {
+ dest[i] = int64(val)
+ }
+ } else {
+ dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ }
+ pos += 8
+ continue
+
+ case fieldTypeFloat:
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
+ pos += 4
+ continue
+
+ case fieldTypeDouble:
+ dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ continue
+
+ // Length coded Binary Strings
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
+ var isNull bool
+ var n int
+ dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
+ pos += n
+ if err == nil {
+ if !isNull {
+ continue
+ } else {
+ dest[i] = nil
+ continue
+ }
+ }
+ return err
+
+ case
+ fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
+ fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
+ fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
+
+ num, isNull, n := readLengthEncodedInteger(data[pos:])
+ pos += n
+
+ switch {
+ case isNull:
+ dest[i] = nil
+ continue
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
+ // database/sql does not support an equivalent to TIME, return a string
+ var dstlen uint8
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 8
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 8 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
+ case rows.mc.parseTime:
+ dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
+ default:
+ var dstlen uint8
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
+ dstlen = 10
+ } else {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
+ case 0x00, 0x1f:
+ dstlen = 19
+ case 1, 2, 3, 4, 5, 6:
+ dstlen = 19 + 1 + decimals
+ default:
+ return fmt.Errorf(
+ "protocol error, illegal decimals value %d",
+ rows.rs.columns[i].decimals,
+ )
+ }
+ }
+ dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
+ }
+
+ if err == nil {
+ pos += int(num)
+ continue
+ } else {
+ return err
+ }
+
+ // Please report if this happens!
+ default:
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
new file mode 100644
index 0000000..c6438d0
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/result.go
@@ -0,0 +1,22 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlResult struct {
+ affectedRows int64
+ insertId int64
+}
+
+func (res *mysqlResult) LastInsertId() (int64, error) {
+ return res.insertId, nil
+}
+
+func (res *mysqlResult) RowsAffected() (int64, error) {
+ return res.affectedRows, nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
new file mode 100644
index 0000000..888bdb5
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -0,0 +1,223 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "io"
+ "math"
+ "reflect"
+)
+
+type resultSet struct {
+ columns []mysqlField
+ columnNames []string
+ done bool
+}
+
+type mysqlRows struct {
+ mc *mysqlConn
+ rs resultSet
+ finish func()
+}
+
+type binaryRows struct {
+ mysqlRows
+}
+
+type textRows struct {
+ mysqlRows
+}
+
+func (rows *mysqlRows) Columns() []string {
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
+ columns := make([]string, len(rows.rs.columns))
+ if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
+ for i := range columns {
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
+ } else {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+ } else {
+ for i := range columns {
+ columns[i] = rows.rs.columns[i].name
+ }
+ }
+
+ rows.rs.columnNames = columns
+ return columns
+}
+
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ return rows.rs.columns[i].typeDatabaseName()
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
+func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
+ mc := rows.mc
+ if mc == nil {
+ return nil
+ }
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // flip the buffer for this connection if we need to drain it.
+ // note that for a successful query (i.e. one where rows.next()
+ // has been called until it returns false), `rows.mc` will be nil
+ // by the time the user calls `(*Rows).Close`, so we won't reach this
+ // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
+ mc.buf.flip()
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
+ if err == nil {
+ if err = mc.discardResults(); err != nil {
+ return err
+ }
+ }
+
+ rows.mc = nil
+ return err
+}
+
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if err := rows.mc.error(); err != nil {
+ return 0, err
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
+ }
+ rows.rs.done = true
+ }
+
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
+ }
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() error {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *binaryRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
+
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
+func (rows *textRows) Next(dest []driver.Value) error {
+ if mc := rows.mc; mc != nil {
+ if err := mc.error(); err != nil {
+ return err
+ }
+
+ // Fetch next row from stream
+ return rows.readRow(dest)
+ }
+ return io.EOF
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
new file mode 100644
index 0000000..18a3ae4
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -0,0 +1,220 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+)
+
+type mysqlStmt struct {
+ mc *mysqlConn
+ id uint32
+ paramCount int
+}
+
+func (stmt *mysqlStmt) Close() error {
+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
+ // driver.Stmt.Close can be called more than once, thus this function
+ // has to be idempotent.
+ // See also Issue #450 and golang/go#16019.
+ //errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
+ stmt.mc = nil
+ return err
+}
+
+func (stmt *mysqlStmt) NumInput() int {
+ return stmt.paramCount
+}
+
+func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
+ return converter{}
+}
+
+func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
+ nv.Value, err = converter{}.ConvertValue(nv.Value)
+ return
+}
+
+func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
+ if stmt.mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ mc.affectedRows = 0
+ mc.insertId = 0
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
+ }
+ }
+
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
+}
+
+func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return nil, driver.ErrBadConn
+ }
+ // Send command
+ err := stmt.writeExecutePacket(args)
+ if err != nil {
+ return nil, stmt.mc.markBadConn(err)
+ }
+
+ mc := stmt.mc
+
+ // Read Result
+ resLen, err := mc.readResultSetHeaderPacket()
+ if err != nil {
+ return nil, err
+ }
+
+ rows := new(binaryRows)
+
+ if resLen > 0 {
+ rows.mc = mc
+ rows.rs.columns, err = mc.readColumns(resLen)
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
+ }
+
+ return rows, err
+}
+
+var jsonType = reflect.TypeOf(json.RawMessage{})
+
+type converter struct{}
+
+// ConvertValue mirrors the reference/default converter in database/sql/driver
+// with _one_ exception. We support uint64 with their high bit and the default
+// implementation does not. This function should be kept in sync with
+// database/sql/driver defaultConverter.ConvertValue() except for that
+// deliberate difference.
+func (c converter) ConvertValue(v interface{}) (driver.Value, error) {
+ if driver.IsValue(v) {
+ return v, nil
+ }
+
+ if vr, ok := v.(driver.Valuer); ok {
+ sv, err := callValuerValue(vr)
+ if err != nil {
+ return nil, err
+ }
+ if driver.IsValue(sv) {
+ return sv, nil
+ }
+ // A value returend from the Valuer interface can be "a type handled by
+ // a database driver's NamedValueChecker interface" so we should accept
+ // uint64 here as well.
+ if u, ok := sv.(uint64); ok {
+ return u, nil
+ }
+ return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
+ }
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Ptr:
+ // indirect pointers
+ if rv.IsNil() {
+ return nil, nil
+ } else {
+ return c.ConvertValue(rv.Elem().Interface())
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return rv.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return rv.Uint(), nil
+ case reflect.Float32, reflect.Float64:
+ return rv.Float(), nil
+ case reflect.Bool:
+ return rv.Bool(), nil
+ case reflect.Slice:
+ switch t := rv.Type(); {
+ case t == jsonType:
+ return v, nil
+ case t.Elem().Kind() == reflect.Uint8:
+ return rv.Bytes(), nil
+ default:
+ return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind())
+ }
+ case reflect.String:
+ return rv.String(), nil
+ }
+ return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
+}
+
+var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
+
+// callValuerValue returns vr.Value(), with one exception:
+// If vr.Value is an auto-generated method on a pointer type and the
+// pointer is nil, it would panic at runtime in the panicwrap
+// method. Treat it like nil instead.
+//
+// This is so people can implement driver.Value on value types and
+// still use nil pointers to those types to mean nil/NULL, just like
+// string/*string.
+//
+// This is an exact copy of the same-named unexported function from the
+// database/sql package.
+func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
+ if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
+ rv.IsNil() &&
+ rv.Type().Elem().Implements(valuerReflectType) {
+ return nil, nil
+ }
+ return vr.Value()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
new file mode 100644
index 0000000..417d727
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -0,0 +1,31 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+type mysqlTx struct {
+ mc *mysqlConn
+}
+
+func (tx *mysqlTx) Commit() (err error) {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("COMMIT")
+ tx.mc = nil
+ return
+}
+
+func (tx *mysqlTx) Rollback() (err error) {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
+ return ErrInvalidConn
+ }
+ err = tx.mc.exec("ROLLBACK")
+ tx.mc = nil
+ return
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
new file mode 100644
index 0000000..d6545f5
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -0,0 +1,868 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// Registry for custom tls.Configs
+var (
+ tlsConfigLock sync.RWMutex
+ tlsConfigRegistry map[string]*tls.Config
+)
+
+// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
+// Use the key as a value in the DSN where tls=value.
+//
+// Note: The provided tls.Config is exclusively owned by the driver after
+// registering it.
+//
+// rootCertPool := x509.NewCertPool()
+// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
+// log.Fatal("Failed to append PEM.")
+// }
+// clientCert := make([]tls.Certificate, 0, 1)
+// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
+// if err != nil {
+// log.Fatal(err)
+// }
+// clientCert = append(clientCert, certs)
+// mysql.RegisterTLSConfig("custom", &tls.Config{
+// RootCAs: rootCertPool,
+// Certificates: clientCert,
+// })
+// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
+//
+func RegisterTLSConfig(key string, config *tls.Config) error {
+ if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
+ return fmt.Errorf("key '%s' is reserved", key)
+ }
+
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry == nil {
+ tlsConfigRegistry = make(map[string]*tls.Config)
+ }
+
+ tlsConfigRegistry[key] = config
+ tlsConfigLock.Unlock()
+ return nil
+}
+
+// DeregisterTLSConfig removes the tls.Config associated with key.
+func DeregisterTLSConfig(key string) {
+ tlsConfigLock.Lock()
+ if tlsConfigRegistry != nil {
+ delete(tlsConfigRegistry, key)
+ }
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegistry[key]; ok {
+ config = v.Clone()
+ }
+ tlsConfigLock.RUnlock()
+ return
+}
+
+// Returns the bool value of the input.
+// The 2nd return value indicates if the input was a valid bool value
+func readBool(input string) (value bool, valid bool) {
+ switch input {
+ case "1", "true", "TRUE", "True":
+ return true, true
+ case "0", "false", "FALSE", "False":
+ return false, true
+ }
+
+ // Not a valid bool value
+ return
+}
+
+/******************************************************************************
+* Time related utils *
+******************************************************************************/
+
+func parseDateTime(b []byte, loc *time.Location) (time.Time, error) {
+ const base = "0000-00-00 00:00:00.000000"
+ switch len(b) {
+ case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
+ if string(b) == base[:len(b)] {
+ return time.Time{}, nil
+ }
+
+ year, err := parseByteYear(b)
+ if err != nil {
+ return time.Time{}, err
+ }
+ if year <= 0 {
+ year = 1
+ }
+
+ if b[4] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4])
+ }
+
+ m, err := parseByte2Digits(b[5], b[6])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if m <= 0 {
+ m = 1
+ }
+ month := time.Month(m)
+
+ if b[7] != '-' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7])
+ }
+
+ day, err := parseByte2Digits(b[8], b[9])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if day <= 0 {
+ day = 1
+ }
+ if len(b) == 10 {
+ return time.Date(year, month, day, 0, 0, 0, 0, loc), nil
+ }
+
+ if b[10] != ' ' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10])
+ }
+
+ hour, err := parseByte2Digits(b[11], b[12])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[13] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13])
+ }
+
+ min, err := parseByte2Digits(b[14], b[15])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if b[16] != ':' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16])
+ }
+
+ sec, err := parseByte2Digits(b[17], b[18])
+ if err != nil {
+ return time.Time{}, err
+ }
+ if len(b) == 19 {
+ return time.Date(year, month, day, hour, min, sec, 0, loc), nil
+ }
+
+ if b[19] != '.' {
+ return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19])
+ }
+ nsec, err := parseByteNanoSec(b[20:])
+ if err != nil {
+ return time.Time{}, err
+ }
+ return time.Date(year, month, day, hour, min, sec, nsec, loc), nil
+ default:
+ return time.Time{}, fmt.Errorf("invalid time bytes: %s", b)
+ }
+}
+
+func parseByteYear(b []byte) (int, error) {
+ year, n := 0, 1000
+ for i := 0; i < 4; i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ year += v * n
+ n = n / 10
+ }
+ return year, nil
+}
+
+func parseByte2Digits(b1, b2 byte) (int, error) {
+ d1, err := bToi(b1)
+ if err != nil {
+ return 0, err
+ }
+ d2, err := bToi(b2)
+ if err != nil {
+ return 0, err
+ }
+ return d1*10 + d2, nil
+}
+
+func parseByteNanoSec(b []byte) (int, error) {
+ ns, digit := 0, 100000 // max is 6-digits
+ for i := 0; i < len(b); i++ {
+ v, err := bToi(b[i])
+ if err != nil {
+ return 0, err
+ }
+ ns += v * digit
+ digit /= 10
+ }
+ // nanoseconds has 10-digits. (needs to scale digits)
+ // 10 - 6 = 4, so we have to multiple 1000.
+ return ns * 1000, nil
+}
+
+func bToi(b byte) (int, error) {
+ if b < '0' || b > '9' {
+ return 0, errors.New("not [0-9]")
+ }
+ return int(b - '0'), nil
+}
+
+func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
+ switch num {
+ case 0:
+ return time.Time{}, nil
+ case 4:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ 0, 0, 0, 0,
+ loc,
+ ), nil
+ case 7:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ 0,
+ loc,
+ ), nil
+ case 11:
+ return time.Date(
+ int(binary.LittleEndian.Uint16(data[:2])), // year
+ time.Month(data[2]), // month
+ int(data[3]), // day
+ int(data[4]), // hour
+ int(data[5]), // minutes
+ int(data[6]), // seconds
+ int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
+ loc,
+ ), nil
+ }
+ return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
+}
+
+func appendDateTime(buf []byte, t time.Time) ([]byte, error) {
+ year, month, day := t.Date()
+ hour, min, sec := t.Clock()
+ nsec := t.Nanosecond()
+
+ if year < 1 || year > 9999 {
+ return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap
+ }
+ year100 := year / 100
+ year1 := year % 100
+
+ var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape
+ localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1]
+ localBuf[4] = '-'
+ localBuf[5], localBuf[6] = digits10[month], digits01[month]
+ localBuf[7] = '-'
+ localBuf[8], localBuf[9] = digits10[day], digits01[day]
+
+ if hour == 0 && min == 0 && sec == 0 && nsec == 0 {
+ return append(buf, localBuf[:10]...), nil
+ }
+
+ localBuf[10] = ' '
+ localBuf[11], localBuf[12] = digits10[hour], digits01[hour]
+ localBuf[13] = ':'
+ localBuf[14], localBuf[15] = digits10[min], digits01[min]
+ localBuf[16] = ':'
+ localBuf[17], localBuf[18] = digits10[sec], digits01[sec]
+
+ if nsec == 0 {
+ return append(buf, localBuf[:19]...), nil
+ }
+ nsec100000000 := nsec / 100000000
+ nsec1000000 := (nsec / 1000000) % 100
+ nsec10000 := (nsec / 10000) % 100
+ nsec100 := (nsec / 100) % 100
+ nsec1 := nsec % 100
+ localBuf[19] = '.'
+
+ // milli second
+ localBuf[20], localBuf[21], localBuf[22] =
+ digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000]
+ // micro second
+ localBuf[23], localBuf[24], localBuf[25] =
+ digits10[nsec10000], digits01[nsec10000], digits10[nsec100]
+ // nano second
+ localBuf[26], localBuf[27], localBuf[28] =
+ digits01[nsec100], digits10[nsec1], digits01[nsec1]
+
+ // trim trailing zeros
+ n := len(localBuf)
+ for n > 0 && localBuf[n-1] == '0' {
+ n--
+ }
+
+ return append(buf, localBuf[:n]...), nil
+}
+
+// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
+// if the DATE or DATETIME has the zero value.
+// It must never be changed.
+// The current behavior depends on database/sql copying the result.
+var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
+
+const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
+const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
+
+func appendMicrosecs(dst, src []byte, decimals int) []byte {
+ if decimals <= 0 {
+ return dst
+ }
+ if len(src) == 0 {
+ return append(dst, ".000000"[:decimals+1]...)
+ }
+
+ microsecs := binary.LittleEndian.Uint32(src[:4])
+ p1 := byte(microsecs / 10000)
+ microsecs -= 10000 * uint32(p1)
+ p2 := byte(microsecs / 100)
+ microsecs -= 100 * uint32(p2)
+ p3 := byte(microsecs)
+
+ switch decimals {
+ default:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3], digits01[p3],
+ )
+ case 1:
+ return append(dst, '.',
+ digits10[p1],
+ )
+ case 2:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ )
+ case 3:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2],
+ )
+ case 4:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ )
+ case 5:
+ return append(dst, '.',
+ digits10[p1], digits01[p1],
+ digits10[p2], digits01[p2],
+ digits10[p3],
+ )
+ }
+}
+
+func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[:length], nil
+ }
+ var dst []byte // return value
+ var p1, p2, p3 byte // current digit pair
+
+ switch length {
+ case 10, 19, 21, 22, 23, 24, 25, 26:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s length %d", t, length)
+ }
+ switch len(src) {
+ case 4, 7, 11:
+ default:
+ t := "DATE"
+ if length > 10 {
+ t += "TIME"
+ }
+ return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
+ }
+ dst = make([]byte, 0, length)
+ // start with the date
+ year := binary.LittleEndian.Uint16(src[:2])
+ pt := year / 100
+ p1 = byte(year - 100*uint16(pt))
+ p2, p3 = src[2], src[3]
+ dst = append(dst,
+ digits10[pt], digits01[pt],
+ digits10[p1], digits01[p1], '-',
+ digits10[p2], digits01[p2], '-',
+ digits10[p3], digits01[p3],
+ )
+ if length == 10 {
+ return dst, nil
+ }
+ if len(src) == 4 {
+ return append(dst, zeroDateTime[10:length]...), nil
+ }
+ dst = append(dst, ' ')
+ p1 = src[4] // hour
+ src = src[5:]
+
+ // p1 is 2-digit hour, src is after hour
+ p2, p3 = src[0], src[1]
+ dst = append(dst,
+ digits10[p1], digits01[p1], ':',
+ digits10[p2], digits01[p2], ':',
+ digits10[p3], digits01[p3],
+ )
+ return appendMicrosecs(dst, src[2:], int(length)-20), nil
+}
+
+func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
+ // length expects the deterministic length of the zero value,
+ // negative time and 100+ hours are automatically added if needed
+ if len(src) == 0 {
+ return zeroDateTime[11 : 11+length], nil
+ }
+ var dst []byte // return value
+
+ switch length {
+ case
+ 8, // time (can be up to 10 when negative and 100+ hours)
+ 10, 11, 12, 13, 14, 15: // time with fractional seconds
+ default:
+ return nil, fmt.Errorf("illegal TIME length %d", length)
+ }
+ switch len(src) {
+ case 8, 12:
+ default:
+ return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
+ }
+ // +2 to enable negative time and 100+ hours
+ dst = make([]byte, 0, length+2)
+ if src[0] == 1 {
+ dst = append(dst, '-')
+ }
+ days := binary.LittleEndian.Uint32(src[1:5])
+ hours := int64(days)*24 + int64(src[5])
+
+ if hours >= 100 {
+ dst = strconv.AppendInt(dst, hours, 10)
+ } else {
+ dst = append(dst, digits10[hours], digits01[hours])
+ }
+
+ min, sec := src[6], src[7]
+ dst = append(dst, ':',
+ digits10[min], digits01[min], ':',
+ digits10[sec], digits01[sec],
+ )
+ return appendMicrosecs(dst, src[8:], int(length)-9), nil
+}
+
+/******************************************************************************
+* Convert from and to bytes *
+******************************************************************************/
+
+func uint64ToBytes(n uint64) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ byte(n >> 32),
+ byte(n >> 40),
+ byte(n >> 48),
+ byte(n >> 56),
+ }
+}
+
+func uint64ToString(n uint64) []byte {
+ var a [20]byte
+ i := 20
+
+ // U+0030 = 0
+ // ...
+ // U+0039 = 9
+
+ var q uint64
+ for n >= 10 {
+ i--
+ q = n / 10
+ a[i] = uint8(n-q*10) + 0x30
+ n = q
+ }
+
+ i--
+ a[i] = uint8(n) + 0x30
+
+ return a[i:]
+}
+
+// treats string value as unsigned integer representation
+func stringToInt(b []byte) int {
+ val := 0
+ for i := range b {
+ val *= 10
+ val += int(b[i] - 0x30)
+ }
+ return val
+}
+
+// returns the string read as a bytes slice, wheter the value is NULL,
+// the number of bytes read and an error, in case the string is longer than
+// the input slice
+func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
+ // Get length
+ num, isNull, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return b[n:n], isNull, n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return b[n-int(num) : n : n], false, n, nil
+ }
+ return nil, false, n, io.EOF
+}
+
+// returns the number of bytes skipped and an error, in case the string is
+// longer than the input slice
+func skipLengthEncodedString(b []byte) (int, error) {
+ // Get length
+ num, _, n := readLengthEncodedInteger(b)
+ if num < 1 {
+ return n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+// returns the number read, whether the value is NULL and the number of bytes read
+func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
+ // See issue #349
+ if len(b) == 0 {
+ return 0, true, 1
+ }
+
+ switch b[0] {
+ // 251: NULL
+ case 0xfb:
+ return 0, true, 1
+
+ // 252: value of following 2
+ case 0xfc:
+ return uint64(b[1]) | uint64(b[2])<<8, false, 3
+
+ // 253: value of following 3
+ case 0xfd:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
+
+ // 254: value of following 8
+ case 0xfe:
+ return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56,
+ false, 9
+ }
+
+ // 0-250: value of first byte
+ return uint64(b[0]), false, 1
+}
+
+// encodes a uint64 value and appends it to the given bytes slice
+func appendLengthEncodedInteger(b []byte, n uint64) []byte {
+ switch {
+ case n <= 250:
+ return append(b, byte(n))
+
+ case n <= 0xffff:
+ return append(b, 0xfc, byte(n), byte(n>>8))
+
+ case n <= 0xffffff:
+ return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
+ }
+ return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
+ byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
+}
+
+// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
+// If cap(buf) is not enough, reallocate new buffer.
+func reserveBuffer(buf []byte, appendSize int) []byte {
+ newSize := len(buf) + appendSize
+ if cap(buf) < newSize {
+ // Grow buffer exponentially
+ newBuf := make([]byte, len(buf)*2+appendSize)
+ copy(newBuf, buf)
+ buf = newBuf
+ }
+ return buf[:newSize]
+}
+
+// escapeBytesBackslash escapes []byte with backslashes (\)
+// This escapes the contents of a string (provided as []byte) by adding backslashes before special
+// characters, and turning others into specific escape sequences, such as
+// turning newlines into \n and null bytes into \0.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
+func escapeBytesBackslash(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringBackslash is similar to escapeBytesBackslash but for string.
+func escapeStringBackslash(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ switch c {
+ case '\x00':
+ buf[pos] = '\\'
+ buf[pos+1] = '0'
+ pos += 2
+ case '\n':
+ buf[pos] = '\\'
+ buf[pos+1] = 'n'
+ pos += 2
+ case '\r':
+ buf[pos] = '\\'
+ buf[pos+1] = 'r'
+ pos += 2
+ case '\x1a':
+ buf[pos] = '\\'
+ buf[pos+1] = 'Z'
+ pos += 2
+ case '\'':
+ buf[pos] = '\\'
+ buf[pos+1] = '\''
+ pos += 2
+ case '"':
+ buf[pos] = '\\'
+ buf[pos+1] = '"'
+ pos += 2
+ case '\\':
+ buf[pos] = '\\'
+ buf[pos+1] = '\\'
+ pos += 2
+ default:
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
+// This escapes the contents of a string by doubling up any apostrophes that
+// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
+// effect on the server.
+// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
+func escapeBytesQuotes(buf, v []byte) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for _, c := range v {
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+// escapeStringQuotes is similar to escapeBytesQuotes but for string.
+func escapeStringQuotes(buf []byte, v string) []byte {
+ pos := len(buf)
+ buf = reserveBuffer(buf, len(v)*2)
+
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ if c == '\'' {
+ buf[pos] = '\''
+ buf[pos+1] = '\''
+ pos += 2
+ } else {
+ buf[pos] = c
+ pos++
+ }
+ }
+
+ return buf[:pos]
+}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _noCopy noCopy
+ value uint32
+}
+
+// IsSet returns whether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// TrySet sets the value of the bool and returns whether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) == 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicError is a wrapper for atomically accessed error values
+type atomicError struct {
+ _noCopy noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 0000000..e810e6f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ WireVarint = 0
+ WireFixed32 = 5
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+ return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+ return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return 0, 0
+ }
+ return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+ buf []byte
+ idx int
+ deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+ b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+ b.buf = buf
+ b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+ b.buf = b.buf[:0]
+ b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+ return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+ return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+ var err error
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+ err := UnmarshalMerge(b.Unread(), m)
+ b.idx = len(b.buf)
+ return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset() { panic("not implemented") }
+func (m *unknownFields) ProtoMessage() { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+ m := MessageReflect(new(unknownFields))
+ m.SetUnknown(b)
+ b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+ fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+ b.buf = protowire.AppendVarint(b.buf, v)
+ return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+ return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+ return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+ b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+ return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+ b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+ return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+ b.buf = protowire.AppendBytes(b.buf, v)
+ return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+ b.buf = protowire.AppendString(b.buf, v)
+ return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+ var err error
+ b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+ b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+ return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+ v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+ v, err := b.DecodeVarint()
+ if err != nil {
+ return 0, err
+ }
+ return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+ v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+ v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+ if n < 0 {
+ return 0, protowire.ParseError(n)
+ }
+ b.idx += n
+ return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+ v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+ if n < 0 {
+ return nil, protowire.ParseError(n)
+ }
+ b.idx += n
+ if alloc {
+ v = append([]byte(nil), v...)
+ }
+ return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+ v, n := protowire.ConsumeString(b.buf[b.idx:])
+ if n < 0 {
+ return "", protowire.ParseError(n)
+ }
+ b.idx += n
+ return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+ v, err := b.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+ v, n, err := consumeGroup(b.buf[b.idx:])
+ if err != nil {
+ return err
+ }
+ b.idx += n
+ return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+ b0 := b
+ depth := 1 // assume this follows a start group marker
+ for {
+ _, wtyp, tagLen := protowire.ConsumeTag(b)
+ if tagLen < 0 {
+ return nil, 0, protowire.ParseError(tagLen)
+ }
+ b = b[tagLen:]
+
+ var valLen int
+ switch wtyp {
+ case protowire.VarintType:
+ _, valLen = protowire.ConsumeVarint(b)
+ case protowire.Fixed32Type:
+ _, valLen = protowire.ConsumeFixed32(b)
+ case protowire.Fixed64Type:
+ _, valLen = protowire.ConsumeFixed64(b)
+ case protowire.BytesType:
+ _, valLen = protowire.ConsumeBytes(b)
+ case protowire.StartGroupType:
+ depth++
+ case protowire.EndGroupType:
+ depth--
+ default:
+ return nil, 0, errors.New("proto: cannot parse reserved wire type")
+ }
+ if valLen < 0 {
+ return nil, 0, protowire.ParseError(valLen)
+ }
+ b = b[valLen:]
+
+ if depth == 0 {
+ return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 0000000..d399bf0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+ if m != nil {
+ setDefaults(MessageReflect(m))
+ }
+}
+
+func setDefaults(m protoreflect.Message) {
+ fds := m.Descriptor().Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ if !m.Has(fd) {
+ if fd.HasDefault() && fd.ContainingOneof() == nil {
+ v := fd.Default()
+ if fd.Kind() == protoreflect.BytesKind {
+ v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+ }
+ m.Set(fd, v)
+ }
+ continue
+ }
+ }
+
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ setDefaults(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ setDefaults(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ setDefaults(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..e8db57e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,113 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+
+ protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+ // Deprecated: No longer returned.
+ ErrNil = errors.New("proto: Marshal called with nil")
+
+ // Deprecated: No longer returned.
+ ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+ // Deprecated: No longer returned.
+ ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: Do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: Do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+ return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+ return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+ DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+ return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+ return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+ return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..2187e87
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+func DiscardUnknown(m Message) {
+ if m != nil {
+ discardUnknown(MessageReflect(m))
+ }
+}
+
+func discardUnknown(m protoreflect.Message) {
+ m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ switch {
+ // Handle singular message.
+ case fd.Cardinality() != protoreflect.Repeated:
+ if fd.Message() != nil {
+ discardUnknown(m.Get(fd).Message())
+ }
+ // Handle list of messages.
+ case fd.IsList():
+ if fd.Message() != nil {
+ ls := m.Get(fd).List()
+ for i := 0; i < ls.Len(); i++ {
+ discardUnknown(ls.Get(i).Message())
+ }
+ }
+ // Handle map of messages.
+ case fd.IsMap():
+ if fd.MapValue().Message() != nil {
+ ms := m.Get(fd).Map()
+ ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+ discardUnknown(v.Message())
+ return true
+ })
+ }
+ }
+ return true
+ })
+
+ // Discard unknown fields.
+ if len(m.GetUnknown()) > 0 {
+ m.SetUnknown(nil)
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..42fc120
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,356 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+type (
+ // ExtensionDesc represents an extension descriptor and
+ // is used to interact with an extension field in a message.
+ //
+ // Variables of this type are generated in code by protoc-gen-go.
+ ExtensionDesc = protoimpl.ExtensionInfo
+
+ // ExtensionRange represents a range of message extensions.
+ // Used in code generated by protoc-gen-go.
+ ExtensionRange = protoiface.ExtensionRangeV1
+
+ // Deprecated: Do not use; this is an internal type.
+ Extension = protoimpl.ExtensionFieldV1
+
+ // Deprecated: Do not use; this is an internal type.
+ XXX_InternalExtensions = protoimpl.ExtensionFields
+)
+
+// ErrMissingExtension reports whether the extension was not present.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return false
+ }
+
+ // Check whether any populated known field matches the field number.
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ has = mr.Has(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ has = int32(fd.Number()) == xt.Field
+ return !has
+ })
+ }
+
+ // Check whether any unknown field matches the field number.
+ for b := mr.GetUnknown(); !has && len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ has = int32(num) == xt.Field
+ b = b[n:]
+ }
+ return has
+}
+
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ xtd := xt.TypeDescriptor()
+ if isValidExtension(mr.Descriptor(), xtd) {
+ mr.Clear(xtd)
+ } else {
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if int32(fd.Number()) == xt.Field {
+ mr.Clear(fd)
+ return false
+ }
+ return true
+ })
+ }
+ clearUnknown(mr, fieldNum(xt.Field))
+}
+
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if fd.IsExtension() {
+ mr.Clear(fd)
+ }
+ return true
+ })
+ clearUnknown(mr, mr.Descriptor().ExtensionRanges())
+}
+
+// GetExtension retrieves a proto2 extended field from m.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Retrieve the unknown fields for this extension field.
+ var bo protoreflect.RawFields
+ for bi := mr.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if int32(num) == xt.Field {
+ bo = append(bo, bi[:n]...)
+ }
+ bi = bi[n:]
+ }
+
+ // For type incomplete descriptors, only retrieve the unknown fields.
+ if xt.ExtensionType == nil {
+ return []byte(bo), nil
+ }
+
+ // If the extension field only exists as unknown fields, unmarshal it.
+ // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+ }
+ if !mr.Has(xtd) && len(bo) > 0 {
+ m2 := mr.New()
+ if err := (proto.UnmarshalOptions{
+ Resolver: extensionResolver{xt},
+ }.Unmarshal(bo, m2.Interface())); err != nil {
+ return nil, err
+ }
+ if m2.Has(xtd) {
+ mr.Set(xtd, m2.Get(xtd))
+ clearUnknown(mr, fieldNum(xt.Field))
+ }
+ }
+
+ // Check whether the message has the extension field set or a default.
+ var pv protoreflect.Value
+ switch {
+ case mr.Has(xtd):
+ pv = mr.Get(xtd)
+ case xtd.HasDefault():
+ pv = xtd.Default()
+ default:
+ return nil, ErrMissingExtension
+ }
+
+ v := xt.InterfaceOf(pv)
+ rv := reflect.ValueOf(v)
+ if isScalarKind(rv.Kind()) {
+ rv2 := reflect.New(rv.Type())
+ rv2.Elem().Set(rv)
+ v = rv2.Interface()
+ }
+ return v, nil
+}
+
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
+
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+ return r.xt, nil
+ }
+ return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+ return r.xt, nil
+ }
+ return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return nil, errNotExtendable
+ }
+
+ vs := make([]interface{}, len(xts))
+ for i, xt := range xts {
+ v, err := GetExtension(m, xt)
+ if err != nil {
+ if err == ErrMissingExtension {
+ continue
+ }
+ return vs, err
+ }
+ vs[i] = v
+ }
+ return vs, nil
+}
+
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return errNotExtendable
+ }
+
+ rv := reflect.ValueOf(v)
+ if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+ return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
+ }
+ if rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+ }
+ if isScalarKind(rv.Elem().Kind()) {
+ v = rv.Elem().Interface()
+ }
+ }
+
+ xtd := xt.TypeDescriptor()
+ if !isValidExtension(mr.Descriptor(), xtd) {
+ return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+ }
+ mr.Set(xtd, xt.ValueOf(v))
+ clearUnknown(mr, fieldNum(xt.Field))
+ return nil
+}
+
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return
+ }
+
+ // Verify that the raw field is valid.
+ for b0 := b; len(b0) > 0; {
+ num, _, n := protowire.ConsumeField(b0)
+ if int32(num) != fnum {
+ panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+ }
+ b0 = b0[n:]
+ }
+
+ ClearExtension(m, &ExtensionDesc{Field: fnum})
+ mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
+
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+ return nil, errNotExtendable
+ }
+
+ // Collect a set of known extension descriptors.
+ extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+ mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ xt := fd.(protoreflect.ExtensionTypeDescriptor)
+ if xd, ok := xt.Type().(*ExtensionDesc); ok {
+ extDescs[fd.Number()] = xd
+ }
+ }
+ return true
+ })
+
+ // Collect a set of unknown extension descriptors.
+ extRanges := mr.Descriptor().ExtensionRanges()
+ for b := mr.GetUnknown(); len(b) > 0; {
+ num, _, n := protowire.ConsumeField(b)
+ if extRanges.Has(num) && extDescs[num] == nil {
+ extDescs[num] = nil
+ }
+ b = b[n:]
+ }
+
+ // Transpose the set of descriptors into a list.
+ var xts []*ExtensionDesc
+ for num, xt := range extDescs {
+ if xt == nil {
+ xt = &ExtensionDesc{Field: int32(num)}
+ }
+ xts = append(xts, xt)
+ }
+ return xts, nil
+}
+
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+ return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
+}
+
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+ switch k {
+ case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+ return true
+ default:
+ return false
+ }
+}
+
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+ Has(protoreflect.FieldNumber) bool
+}) {
+ var bo protoreflect.RawFields
+ for bi := m.GetUnknown(); len(bi) > 0; {
+ num, _, n := protowire.ConsumeField(bi)
+ if !remover.Has(num) {
+ bo = append(bo, bi[:n]...)
+ }
+ bi = bi[n:]
+ }
+ if bi := m.GetUnknown(); len(bi) != len(bo) {
+ m.SetUnknown(bo)
+ }
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+ return protoreflect.FieldNumber(n1) == n2
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..dcdc220
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
+type StructProperties struct {
+ // Prop are the properties for each field.
+ //
+ // Fields belonging to a oneof are stored in OneofTypes instead, with a
+ // single Properties representing the parent oneof held here.
+ //
+ // The order of Prop matches the order of fields in the Go struct.
+ // Struct fields that are not related to protobufs have a "XXX_" prefix
+ // in the Properties.Name and must be ignored by the user.
+ Prop []*Properties
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the protobuf field name.
+ OneofTypes map[string]*OneofProperties
+}
+
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
+type Properties struct {
+ // Name is a placeholder name with little meaningful semantic value.
+ // If the name has an "XXX_" prefix, the entire Properties must be ignored.
+ Name string
+ // OrigName is the protobuf field name or oneof name.
+ OrigName string
+ // JSONName is the JSON name for the protobuf field.
+ JSONName string
+ // Enum is a placeholder name for enums.
+ // For historical reasons, this is neither the Go name for the enum,
+ // nor the protobuf name for the enum.
+ Enum string // Deprecated: Do not use.
+ // Weak contains the full name of the weakly referenced message.
+ Weak string
+ // Wire is a string representation of the wire type.
+ Wire string
+ // WireType is the protobuf wire type for the field.
+ WireType int
+ // Tag is the protobuf field number.
+ Tag int
+ // Required reports whether this is a required field.
+ Required bool
+ // Optional reports whether this is a optional field.
+ Optional bool
+ // Repeated reports whether this is a repeated field.
+ Repeated bool
+ // Packed reports whether this is a packed repeated field of scalars.
+ Packed bool
+ // Proto3 reports whether this field operates under the proto3 syntax.
+ Proto3 bool
+ // Oneof reports whether this field belongs within a oneof.
+ Oneof bool
+
+ // Default is the default value in string form.
+ Default string
+ // HasDefault reports whether the field has a default value.
+ HasDefault bool
+
+ // MapKeyProp is the properties for the key field for a map field.
+ MapKeyProp *Properties
+ // MapValProp is the properties for the value field for a map field.
+ MapValProp *Properties
+}
+
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+ // Type is a pointer to the generated wrapper type for the field value.
+ // This is nil for messages that are not in the open-struct API.
+ Type reflect.Type
+ // Field is the index into StructProperties.Prop for the containing oneof.
+ Field int
+ // Prop is the properties for the field.
+ Prop *Properties
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s += "," + strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != "" {
+ s += ",json=" + p.JSONName
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if len(p.Weak) > 0 {
+ s += ",weak=" + p.Weak
+ }
+ if p.Proto3 {
+ s += ",proto3"
+ }
+ if p.Oneof {
+ s += ",oneof"
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(tag string) {
+ // For example: "bytes,49,opt,name=foo,def=hello!"
+ for len(tag) > 0 {
+ i := strings.IndexByte(tag, ',')
+ if i < 0 {
+ i = len(tag)
+ }
+ switch s := tag[:i]; {
+ case strings.HasPrefix(s, "name="):
+ p.OrigName = s[len("name="):]
+ case strings.HasPrefix(s, "json="):
+ p.JSONName = s[len("json="):]
+ case strings.HasPrefix(s, "enum="):
+ p.Enum = s[len("enum="):]
+ case strings.HasPrefix(s, "weak="):
+ p.Weak = s[len("weak="):]
+ case strings.Trim(s, "0123456789") == "":
+ n, _ := strconv.ParseUint(s, 10, 32)
+ p.Tag = int(n)
+ case s == "opt":
+ p.Optional = true
+ case s == "req":
+ p.Required = true
+ case s == "rep":
+ p.Repeated = true
+ case s == "varint" || s == "zigzag32" || s == "zigzag64":
+ p.Wire = s
+ p.WireType = WireVarint
+ case s == "fixed32":
+ p.Wire = s
+ p.WireType = WireFixed32
+ case s == "fixed64":
+ p.Wire = s
+ p.WireType = WireFixed64
+ case s == "bytes":
+ p.Wire = s
+ p.WireType = WireBytes
+ case s == "group":
+ p.Wire = s
+ p.WireType = WireStartGroup
+ case s == "packed":
+ p.Packed = true
+ case s == "proto3":
+ p.Proto3 = true
+ case s == "oneof":
+ p.Oneof = true
+ case strings.HasPrefix(s, "def="):
+ // The default tag is special in that everything afterwards is the
+ // default regardless of the presence of commas.
+ p.HasDefault = true
+ p.Default, i = tag[len("def="):], len(tag)
+ }
+ tag = strings.TrimPrefix(tag[i:], ",")
+ }
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.Name = name
+ p.OrigName = name
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+
+ if typ != nil && typ.Kind() == reflect.Map {
+ p.MapKeyProp = new(Properties)
+ p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+ p.MapValProp = new(Properties)
+ p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+ }
+}
+
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
+
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
+func GetProperties(t reflect.Type) *StructProperties {
+ if p, ok := propertiesCache.Load(t); ok {
+ return p.(*StructProperties)
+ }
+ p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+ return p.(*StructProperties)
+}
+
+func newProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+
+ var hasOneof bool
+ prop := new(StructProperties)
+
+ // Construct a list of properties for each field in the struct.
+ for i := 0; i < t.NumField(); i++ {
+ p := new(Properties)
+ f := t.Field(i)
+ tagField := f.Tag.Get("protobuf")
+ p.Init(f.Type, f.Name, tagField, &f)
+
+ tagOneof := f.Tag.Get("protobuf_oneof")
+ if tagOneof != "" {
+ hasOneof = true
+ p.OrigName = tagOneof
+ }
+
+ // Rename unrelated struct fields with the "XXX_" prefix since so much
+ // user code simply checks for this to exclude special fields.
+ if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+ p.Name = "XXX_" + p.Name
+ p.OrigName = "XXX_" + p.OrigName
+ } else if p.Weak != "" {
+ p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
+ }
+
+ prop.Prop = append(prop.Prop, p)
+ }
+
+ // Construct a mapping of oneof field names to properties.
+ if hasOneof {
+ var oneofWrappers []interface{}
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+ }
+ if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+ oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+ }
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+ if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+ oneofWrappers = m.ProtoMessageInfo().OneofWrappers
+ }
+ }
+
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, wrapper := range oneofWrappers {
+ p := &OneofProperties{
+ Type: reflect.ValueOf(wrapper).Type(), // *T
+ Prop: new(Properties),
+ }
+ f := p.Type.Elem().Field(0)
+ p.Prop.Name = f.Name
+ p.Prop.Parse(f.Tag.Get("protobuf"))
+
+ // Determine the struct field that contains this oneof.
+ // Each wrapper is assignable to exactly one parent field.
+ var foundOneof bool
+ for i := 0; i < t.NumField() && !foundOneof; i++ {
+ if p.Type.AssignableTo(t.Field(i).Type) {
+ p.Field = i
+ foundOneof = true
+ }
+ }
+ if !foundOneof {
+ panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+ }
+ prop.OneofTypes[p.Prop.OrigName] = p
+ }
+ }
+
+ return prop
+}
+
+func (sp *StructProperties) Len() int { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 0000000..5aee89c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ ProtoPackageIsVersion1 = true
+ ProtoPackageIsVersion2 = true
+ ProtoPackageIsVersion3 = true
+ ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+ return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+ return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+ return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+ // Marshal formats the encoded bytes of the message.
+ // It should be deterministic and emit valid protobuf wire data.
+ // The caller takes ownership of the returned buffer.
+ Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+ // Unmarshal parses the encoded bytes of the protobuf wire input.
+ // The provided buffer is only valid for during method call.
+ // It should not reset the receiver message.
+ Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+ // Merge merges the contents of src into the receiver message.
+ // It clones all data structures in src such that it aliases no mutable
+ // memory referenced by src.
+ Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+ err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+ if e.err != nil {
+ return e.err.Error()
+ }
+ return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+ return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+ if err := protoV2.CheckInitialized(m); err != nil {
+ return &RequiredNotSetError{err: err}
+ }
+ return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+ return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+ protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+ return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+ ms, ok := md.(interface{ IsMessageSet() bool })
+ return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 0000000..066b432
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,317 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+ "reflect"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+ // Decompress the descriptor.
+ zr, err := gzip.NewReader(bytes.NewReader(d))
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+ b, err := ioutil.ReadAll(zr)
+ if err != nil {
+ panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+ }
+
+ // Construct a protoreflect.FileDescriptor from the raw descriptor.
+ // Note that DescBuilder.Build automatically registers the constructed
+ // file descriptor with the v2 registry.
+ protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+ // Locally cache the raw descriptor form for the file.
+ fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+ if v, ok := fileCache.Load(s); ok {
+ return v.(fileDescGZIP)
+ }
+
+ // Find the descriptor in the v2 registry.
+ var b []byte
+ if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+ b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
+ }
+
+ // Locally cache the raw descriptor form for the file.
+ if len(b) > 0 {
+ v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+ return v.(fileDescGZIP)
+ }
+ return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+ if _, ok := enumCache.Load(s); ok {
+ panic("proto: duplicate enum registered: " + s)
+ }
+ enumCache.Store(s, m)
+
+ // This does not forward registration to the v2 registry since this API
+ // lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+
+ // Check whether the cache is stale. If the number of files in the current
+ // package differs, then it means that some enums may have been recently
+ // registered upstream that we do not know about.
+ var protoPkg protoreflect.FullName
+ if i := strings.LastIndexByte(s, '.'); i >= 0 {
+ protoPkg = protoreflect.FullName(s[:i])
+ }
+ v, _ := numFilesCache.Load(protoPkg)
+ numFiles, _ := v.(int)
+ if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+ return nil // cache is up-to-date; was not found earlier
+ }
+
+ // Update the enum cache for all enums declared in the given proto package.
+ numFiles = 0
+ protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+ walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+ name := protoimpl.X.LegacyEnumName(ed)
+ if _, ok := enumCache.Load(name); !ok {
+ m := make(enumsByName)
+ evs := ed.Values()
+ for i := evs.Len() - 1; i >= 0; i-- {
+ ev := evs.Get(i)
+ m[string(ev.Name())] = int32(ev.Number())
+ }
+ enumCache.LoadOrStore(name, m)
+ }
+ })
+ numFiles++
+ return true
+ })
+ numFilesCache.Store(protoPkg, numFiles)
+
+ // Check cache again for enum map.
+ if v, ok := enumCache.Load(s); ok {
+ return v.(enumsByName)
+ }
+ return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+ Enums() protoreflect.EnumDescriptors
+ Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+ eds := d.Enums()
+ for i := eds.Len() - 1; i >= 0; i-- {
+ f(eds.Get(i))
+ }
+ mds := d.Messages()
+ for i := mds.Len() - 1; i >= 0; i-- {
+ walkEnums(mds.Get(i), f)
+ }
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+ mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+ if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+ panic(err)
+ }
+ messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+ t := reflect.TypeOf(m)
+ if t.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid map kind: %v", t))
+ }
+ if _, ok := messageTypeCache.Load(s); ok {
+ panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+ }
+ messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+ if v, ok := messageTypeCache.Load(s); ok {
+ return v.(reflect.Type)
+ }
+
+ // Derive the message type from the v2 registry.
+ var t reflect.Type
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+ t = messageGoType(mt)
+ }
+
+ // If we could not get a concrete type, it is possible that it is a
+ // pseudo-message for a map entry.
+ if t == nil {
+ d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+ if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+ kt := goTypeForField(md.Fields().ByNumber(1))
+ vt := goTypeForField(md.Fields().ByNumber(2))
+ t = reflect.MapOf(kt, vt)
+ }
+ }
+
+ // Locally cache the message type for the given name.
+ if t != nil {
+ v, _ := messageTypeCache.LoadOrStore(s, t)
+ return v.(reflect.Type)
+ }
+ return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+ switch k := fd.Kind(); k {
+ case protoreflect.EnumKind:
+ if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+ return enumGoType(et)
+ }
+ return reflect.TypeOf(protoreflect.EnumNumber(0))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+ return messageGoType(mt)
+ }
+ return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+ default:
+ return reflect.TypeOf(fd.Default().Interface())
+ }
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+ return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+ return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+ if m == nil {
+ return ""
+ }
+ if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+ return m.XXX_MessageName()
+ }
+ return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+ if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+ panic(err)
+ }
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+ // Check whether the cache is stale. If the number of extensions for
+ // the given message differs, then it means that some extensions were
+ // recently registered upstream that we do not know about.
+ s := MessageName(m)
+ v, _ := extensionCache.Load(s)
+ xs, _ := v.(extensionsByNumber)
+ if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+ return xs // cache is up-to-date
+ }
+
+ // Cache is stale, re-compute the extensions map.
+ xs = make(extensionsByNumber)
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+ if xd, ok := xt.(*ExtensionDesc); ok {
+ xs[int32(xt.TypeDescriptor().Number())] = xd
+ } else {
+ // TODO: This implies that the protoreflect.ExtensionType is a
+ // custom type not generated by protoc-gen-go. We could try and
+ // convert the type to an ExtensionDesc.
+ }
+ return true
+ })
+ extensionCache.Store(s, xs)
+ return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 0000000..47eb3e4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+ Message string
+
+ // Deprecated: Do not use.
+ Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+ if wrapTextUnmarshalV2 {
+ return e.Message
+ }
+ if e.Line == 1 {
+ return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+ }
+ return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+ if u, ok := m.(encoding.TextUnmarshaler); ok {
+ return u.UnmarshalText([]byte(s))
+ }
+
+ m.Reset()
+ mi := MessageV2(m)
+
+ if wrapTextUnmarshalV2 {
+ err := prototext.UnmarshalOptions{
+ AllowPartial: true,
+ }.Unmarshal([]byte(s), mi)
+ if err != nil {
+ return &ParseError{Message: err.Error()}
+ }
+ return checkRequiredNotSet(mi)
+ } else {
+ if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+ return err
+ }
+ return checkRequiredNotSet(mi)
+ }
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+ md := m.Descriptor()
+ fds := md.Fields()
+
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ seen := make(map[protoreflect.FieldNumber]bool)
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := protoreflect.Name(tok.value)
+ fd := fds.ByName(name)
+ switch {
+ case fd == nil:
+ gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+ if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+ fd = gd
+ }
+ case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+ fd = nil
+ case fd.IsWeak() && fd.Message().IsPlaceholder():
+ fd = nil
+ }
+ if fd == nil {
+ typeName := string(md.FullName())
+ if m, ok := m.Interface().(Message); ok {
+ t := reflect.TypeOf(m)
+ if t.Kind() == reflect.Ptr {
+ typeName = t.Elem().String()
+ }
+ }
+ return p.errorf("unknown field name %q in %v", name, typeName)
+ }
+ if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+ }
+ if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+ return p.errorf("non-repeated field %q was repeated", fd.Name())
+ }
+ seen[fd.Number()] = true
+
+ // Consume any colon.
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ if v, err = p.unmarshalValue(v, fd); err != nil {
+ return err
+ }
+ m.Set(fd, v)
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+ name, err := p.consumeExtensionOrAnyName()
+ if err != nil {
+ return err
+ }
+
+ // If it contains a slash, it's an Any type URL.
+ if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+ if err != nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+ }
+ m2 := mt.New()
+ if err := p.unmarshalMessage(m2, terminator); err != nil {
+ return err
+ }
+ b, err := protoV2.Marshal(m2.Interface())
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+ }
+
+ urlFD := m.Descriptor().Fields().ByName("type_url")
+ valFD := m.Descriptor().Fields().ByName("value")
+ if seen[urlFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+ }
+ if seen[valFD.Number()] {
+ return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+ }
+ m.Set(urlFD, protoreflect.ValueOfString(name))
+ m.Set(valFD, protoreflect.ValueOfBytes(b))
+ seen[urlFD.Number()] = true
+ seen[valFD.Number()] = true
+ return nil
+ }
+
+ xname := protoreflect.FullName(name)
+ xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+ if xt == nil && isMessageSet(m.Descriptor()) {
+ xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+ }
+ if xt == nil {
+ return p.errorf("unrecognized extension %q", name)
+ }
+ fd := xt.TypeDescriptor()
+ if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+ return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+ }
+
+ if err := p.checkForColon(fd); err != nil {
+ return err
+ }
+
+ v := m.Get(fd)
+ if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+ v = m.Mutable(fd)
+ }
+ v, err = p.unmarshalValue(v, fd)
+ if err != nil {
+ return err
+ }
+ m.Set(fd, v)
+ return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch {
+ case fd.IsList():
+ lv := v.List()
+ var err error
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return v, nil
+ }
+
+ // One value of the repeated field.
+ p.back()
+ vv := lv.NewElement()
+ vv, err = p.unmarshalSingularValue(vv, fd)
+ if err != nil {
+ return v, err
+ }
+ lv.Append(vv)
+ return v, nil
+ case fd.IsMap():
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // However, implementations may omit key or value, and technically
+ // we should support them in any order.
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+
+ keyFD := fd.MapKey()
+ valFD := fd.MapValue()
+
+ mv := v.Map()
+ kv := keyFD.Default()
+ vv := mv.NewValue()
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ var err error
+ switch tok.value {
+ case "key":
+ if err := p.consumeToken(":"); err != nil {
+ return v, err
+ }
+ if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ case "value":
+ if err := p.checkForColon(valFD); err != nil {
+ return v, err
+ }
+ if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+ return v, err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return v, err
+ }
+ default:
+ p.back()
+ return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+ }
+ }
+ mv.Set(kv.MapKey(), vv)
+ return v, nil
+ default:
+ p.back()
+ return p.unmarshalSingularValue(v, fd)
+ }
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return v, tok.err
+ }
+ if tok.value == "" {
+ return v, p.errorf("unexpected EOF")
+ }
+
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ switch tok.value {
+ case "true", "1", "t", "True":
+ return protoreflect.ValueOfBool(true), nil
+ case "false", "0", "f", "False":
+ return protoreflect.ValueOfBool(false), nil
+ }
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(x)), nil
+ }
+
+ // The C++ parser accepts large positive hex numbers that uses
+ // two's complement arithmetic to represent negative numbers.
+ // This feature is here for backwards compatibility with C++.
+ if strings.HasPrefix(tok.value, "0x") {
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+ }
+ }
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfUint32(uint32(x)), nil
+ }
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ return protoreflect.ValueOfUint64(uint64(x)), nil
+ }
+ case protoreflect.FloatKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 32); err == nil {
+ return protoreflect.ValueOfFloat32(float32(x)), nil
+ }
+ case protoreflect.DoubleKind:
+ // Ignore 'f' for compatibility with output generated by C++,
+ // but don't remove 'f' when the value is "-inf" or "inf".
+ v := tok.value
+ if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+ v = v[:len(v)-len("f")]
+ }
+ if x, err := strconv.ParseFloat(v, 64); err == nil {
+ return protoreflect.ValueOfFloat64(float64(x)), nil
+ }
+ case protoreflect.StringKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfString(tok.unquoted), nil
+ }
+ case protoreflect.BytesKind:
+ if isQuote(tok.value[0]) {
+ return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+ }
+ case protoreflect.EnumKind:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+ }
+ vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+ if vd != nil {
+ return protoreflect.ValueOfEnum(vd.Number()), nil
+ }
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return v, p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ err := p.unmarshalMessage(v.Message(), terminator)
+ return v, err
+ default:
+ panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+ }
+ return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ if fd.Message() == nil {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ ss := string(r) + s[:2]
+ s = s[2:]
+ i, err := strconv.ParseUint(ss, 8, 8)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+ }
+ ss := s[:n]
+ s = s[n:]
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(rune(i)), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 0000000..a31134e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "math"
+ "sort"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line)
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+ b, err := tm.marshal(m)
+ if len(b) > 0 {
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+ b, _ := tm.marshal(m)
+ return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+ mr := MessageReflect(m)
+ if mr == nil || !mr.IsValid() {
+ return []byte(""), nil
+ }
+
+ if wrapTextMarshalV2 {
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ return m.MarshalText()
+ }
+
+ opts := prototext.MarshalOptions{
+ AllowPartial: true,
+ EmitUnknown: true,
+ }
+ if !tm.Compact {
+ opts.Indent = " "
+ }
+ if !tm.ExpandAny {
+ opts.Resolver = (*protoregistry.Types)(nil)
+ }
+ return opts.Marshal(mr.Interface())
+ } else {
+ w := &textWriter{
+ compact: tm.Compact,
+ expandAny: tm.ExpandAny,
+ complete: true,
+ }
+
+ if m, ok := m.(encoding.TextMarshaler); ok {
+ b, err := m.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ w.Write(b)
+ return w.buf, nil
+ }
+
+ err := w.writeMessage(mr)
+ return w.buf, err
+ }
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+ newline = []byte("\n")
+ endBraceNewline = []byte("}\n")
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ compact bool // same as TextMarshaler.Compact
+ expandAny bool // same as TextMarshaler.ExpandAny
+ complete bool // whether the current position is a complete line
+ indent int // indentation level; never negative
+ buf []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, p...)
+ w.complete = false
+ return len(p), nil
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ w.buf = append(w.buf, ' ')
+ n++
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, frag...)
+ n += len(frag)
+ if i+1 < len(frags) {
+ w.buf = append(w.buf, '\n')
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.buf = append(w.buf, c)
+ w.complete = c == '\n'
+ return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+
+ if fd.Kind() != protoreflect.GroupKind {
+ w.buf = append(w.buf, fd.Name()...)
+ w.WriteByte(':')
+ } else {
+ // Use message type name for group field name.
+ w.buf = append(w.buf, fd.Message().Name()...)
+ }
+
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+ md := m.Descriptor()
+ fdURL := md.Fields().ByName("type_url")
+ fdVal := md.Fields().ByName("value")
+
+ url := m.Get(fdURL).String()
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return false, nil
+ }
+
+ b := m.Get(fdVal).Bytes()
+ m2 := mt.New()
+ if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ if requiresQuotes(url) {
+ w.writeQuotedString(url)
+ } else {
+ w.Write([]byte(url))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.indent++
+ }
+ if err := w.writeMessage(m2); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.indent--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if w.expandAny && md.FullName() == "google.protobuf.Any" {
+ if canExpand, err := w.writeProto3Any(m); canExpand {
+ return err
+ }
+ }
+
+ fds := md.Fields()
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil {
+ fd = m.WhichOneof(od)
+ i += od.Fields().Len()
+ } else {
+ i++
+ }
+ if fd == nil || !m.Has(fd) {
+ continue
+ }
+
+ switch {
+ case fd.IsList():
+ lv := m.Get(fd).List()
+ for j := 0; j < lv.Len(); j++ {
+ w.writeName(fd)
+ v := lv.Get(j)
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ case fd.IsMap():
+ kfd := fd.MapKey()
+ vfd := fd.MapValue()
+ mv := m.Get(fd).Map()
+
+ type entry struct{ key, val protoreflect.Value }
+ var entries []entry
+ mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ entries = append(entries, entry{k.Value(), v})
+ return true
+ })
+ sort.Slice(entries, func(i, j int) bool {
+ switch kfd.Kind() {
+ case protoreflect.BoolKind:
+ return !entries[i].key.Bool() && entries[j].key.Bool()
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return entries[i].key.Int() < entries[j].key.Int()
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return entries[i].key.Uint() < entries[j].key.Uint()
+ case protoreflect.StringKind:
+ return entries[i].key.String() < entries[j].key.String()
+ default:
+ panic("invalid kind")
+ }
+ })
+ for _, entry := range entries {
+ w.writeName(fd)
+ w.WriteByte('<')
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ w.writeName(kfd)
+ if err := w.writeSingularValue(entry.key, kfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.writeName(vfd)
+ if err := w.writeSingularValue(entry.val, vfd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ w.indent--
+ w.WriteByte('>')
+ w.WriteByte('\n')
+ }
+ default:
+ w.writeName(fd)
+ if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ }
+ }
+
+ if b := m.GetUnknown(); len(b) > 0 {
+ w.writeUnknownFields(b)
+ }
+ return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ switch fd.Kind() {
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ switch vf := v.Float(); {
+ case math.IsInf(vf, +1):
+ w.Write(posInf)
+ case math.IsInf(vf, -1):
+ w.Write(negInf)
+ case math.IsNaN(vf):
+ w.Write(nan)
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ case protoreflect.StringKind:
+ // NOTE: This does not validate UTF-8 for historical reasons.
+ w.writeQuotedString(string(v.String()))
+ case protoreflect.BytesKind:
+ w.writeQuotedString(string(v.Bytes()))
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var bra, ket byte = '<', '>'
+ if fd.Kind() == protoreflect.GroupKind {
+ bra, ket = '{', '}'
+ }
+ w.WriteByte(bra)
+ if !w.compact {
+ w.WriteByte('\n')
+ }
+ w.indent++
+ m := v.Message()
+ if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+ b, err := m2.MarshalText()
+ if err != nil {
+ return err
+ }
+ w.Write(b)
+ } else {
+ w.writeMessage(m)
+ }
+ w.indent--
+ w.WriteByte(ket)
+ case protoreflect.EnumKind:
+ if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+ fmt.Fprint(w, ev.Name())
+ } else {
+ fmt.Fprint(w, v.Enum())
+ }
+ default:
+ fmt.Fprint(w, v.Interface())
+ }
+ return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+ w.WriteByte('"')
+ for i := 0; i < len(s); i++ {
+ switch c := s[i]; c {
+ case '\n':
+ w.buf = append(w.buf, `\n`...)
+ case '\r':
+ w.buf = append(w.buf, `\r`...)
+ case '\t':
+ w.buf = append(w.buf, `\t`...)
+ case '"':
+ w.buf = append(w.buf, `\"`...)
+ case '\\':
+ w.buf = append(w.buf, `\\`...)
+ default:
+ if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+ w.buf = append(w.buf, c)
+ } else {
+ w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+ }
+ }
+ }
+ w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+ if !w.compact {
+ fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+ }
+
+ for len(b) > 0 {
+ num, wtyp, n := protowire.ConsumeTag(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+
+ if wtyp == protowire.EndGroupType {
+ w.indent--
+ w.Write(endBraceNewline)
+ continue
+ }
+ fmt.Fprint(w, num)
+ if wtyp != protowire.StartGroupType {
+ w.WriteByte(':')
+ }
+ if !w.compact || wtyp == protowire.StartGroupType {
+ w.WriteByte(' ')
+ }
+ switch wtyp {
+ case protowire.VarintType:
+ v, n := protowire.ConsumeVarint(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed32Type:
+ v, n := protowire.ConsumeFixed32(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.Fixed64Type:
+ v, n := protowire.ConsumeFixed64(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprint(w, v)
+ case protowire.BytesType:
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return
+ }
+ b = b[n:]
+ fmt.Fprintf(w, "%q", v)
+ case protowire.StartGroupType:
+ w.WriteByte('{')
+ w.indent++
+ default:
+ fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+ }
+ w.WriteByte('\n')
+ }
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+ md := m.Descriptor()
+ if md.ExtensionRanges().Len() == 0 {
+ return nil
+ }
+
+ type ext struct {
+ desc protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var exts []ext
+ m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ if fd.IsExtension() {
+ exts = append(exts, ext{fd, v})
+ }
+ return true
+ })
+ sort.Slice(exts, func(i, j int) bool {
+ return exts[i].desc.Number() < exts[j].desc.Number()
+ })
+
+ for _, ext := range exts {
+ // For message set, use the name of the message as the extension name.
+ name := string(ext.desc.FullName())
+ if isMessageSet(ext.desc.ContainingMessage()) {
+ name = strings.TrimSuffix(name, ".message_set_extension")
+ }
+
+ if !ext.desc.IsList() {
+ if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+ return err
+ }
+ } else {
+ lv := ext.val.List()
+ for i := 0; i < lv.Len(); i++ {
+ if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+ fmt.Fprintf(w, "[%s]:", name)
+ if !w.compact {
+ w.WriteByte(' ')
+ }
+ if err := w.writeSingularValue(v, fd); err != nil {
+ return err
+ }
+ w.WriteByte('\n')
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ for i := 0; i < w.indent*2; i++ {
+ w.buf = append(w.buf, ' ')
+ }
+ w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 0000000..d7c28da
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+ protoV2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+ if m == nil {
+ return 0
+ }
+ mi := MessageV2(m)
+ return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+ b, err := marshalAppend(nil, m, false)
+ if b == nil {
+ b = zeroBytes
+ }
+ return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return nil, ErrNil
+ }
+ mi := MessageV2(m)
+ nbuf, err := protoV2.MarshalOptions{
+ Deterministic: deterministic,
+ AllowPartial: true,
+ }.MarshalAppend(buf, mi)
+ if err != nil {
+ return buf, err
+ }
+ if len(buf) == len(nbuf) {
+ if !mi.ProtoReflect().IsValid() {
+ return buf, ErrNil
+ }
+ }
+ return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+ m.Reset()
+ return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+ mi := MessageV2(m)
+ out, err := protoV2.UnmarshalOptions{
+ AllowPartial: true,
+ Merge: true,
+ }.UnmarshalState(protoiface.UnmarshalInput{
+ Buf: b,
+ Message: mi.ProtoReflect(),
+ })
+ if err != nil {
+ return err
+ }
+ if out.Flags&protoiface.UnmarshalInitialized > 0 {
+ return nil
+ }
+ return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..398e348
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 0000000..63dc057
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
+
+package descriptor
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/descriptor.proto.
+
+type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
+
+const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
+const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
+const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
+const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
+const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
+const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
+const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
+const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
+const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
+const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
+const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
+const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
+const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
+const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
+const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
+const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
+const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
+const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
+
+var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
+var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
+
+type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
+
+const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
+const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
+const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
+
+var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
+var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
+
+type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
+
+const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
+const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
+const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
+
+var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
+var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
+
+type FieldOptions_CType = descriptorpb.FieldOptions_CType
+
+const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
+const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
+const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
+
+var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
+var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
+
+type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
+
+const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
+const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
+const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
+
+var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
+var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
+
+type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
+
+const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
+const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
+const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
+
+var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
+var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
+
+type FileDescriptorSet = descriptorpb.FileDescriptorSet
+type FileDescriptorProto = descriptorpb.FileDescriptorProto
+type DescriptorProto = descriptorpb.DescriptorProto
+type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
+type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
+type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
+type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
+type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
+type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
+type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
+
+const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
+const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
+
+type FileOptions = descriptorpb.FileOptions
+
+const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
+const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
+const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
+const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
+const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
+const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
+const Default_FileOptions_PhpGenericServices = descriptorpb.Default_FileOptions_PhpGenericServices
+const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
+const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
+
+type MessageOptions = descriptorpb.MessageOptions
+
+const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
+const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
+const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
+
+type FieldOptions = descriptorpb.FieldOptions
+
+const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
+const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
+const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
+const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
+const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
+
+type OneofOptions = descriptorpb.OneofOptions
+type EnumOptions = descriptorpb.EnumOptions
+
+const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
+
+type EnumValueOptions = descriptorpb.EnumValueOptions
+
+const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
+
+type ServiceOptions = descriptorpb.ServiceOptions
+
+const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
+
+type MethodOptions = descriptorpb.MethodOptions
+
+const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
+const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
+
+type UninterpretedOption = descriptorpb.UninterpretedOption
+type SourceCodeInfo = descriptorpb.SourceCodeInfo
+type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
+type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
+type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
+type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
+type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
+type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
+type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
+
+var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
+ 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
+ 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x32,
+}
+
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
+func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
+ if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
+ file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..85f9f57
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,179 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+const urlPrefix = "type.googleapis.com/"
+
+// AnyMessageName returns the message name contained in an anypb.Any message.
+// Most type assertions should use the Is function instead.
+//
+// Deprecated: Call the any.MessageName method instead.
+func AnyMessageName(any *anypb.Any) (string, error) {
+ name, err := anyMessageName(any)
+ return string(name), err
+}
+func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
+ if any == nil {
+ return "", fmt.Errorf("message is nil")
+ }
+ name := protoreflect.FullName(any.TypeUrl)
+ if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
+ name = name[i+len("/"):]
+ }
+ if !name.IsValid() {
+ return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+ }
+ return name, nil
+}
+
+// MarshalAny marshals the given message m into an anypb.Any message.
+//
+// Deprecated: Call the anypb.New function instead.
+func MarshalAny(m proto.Message) (*anypb.Any, error) {
+ switch dm := m.(type) {
+ case DynamicAny:
+ m = dm.Message
+ case *DynamicAny:
+ if dm == nil {
+ return nil, proto.ErrNil
+ }
+ m = dm.Message
+ }
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return nil, err
+ }
+ return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
+}
+
+// Empty returns a new message of the type specified in an anypb.Any message.
+// It returns protoregistry.NotFound if the corresponding message type could not
+// be resolved in the global registry.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
+// to resolve the message name and create a new instance of it.
+func Empty(any *anypb.Any) (proto.Message, error) {
+ name, err := anyMessageName(any)
+ if err != nil {
+ return nil, err
+ }
+ mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
+ if err != nil {
+ return nil, err
+ }
+ return proto.MessageV1(mt.New().Interface()), nil
+}
+
+// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
+// into the provided message m. It returns an error if the target message
+// does not match the type in the Any message or if an unmarshal error occurs.
+//
+// The target message m may be a *DynamicAny message. If the underlying message
+// type could not be resolved, then this returns protoregistry.NotFound.
+//
+// Deprecated: Call the any.UnmarshalTo method instead.
+func UnmarshalAny(any *anypb.Any, m proto.Message) error {
+ if dm, ok := m.(*DynamicAny); ok {
+ if dm.Message == nil {
+ var err error
+ dm.Message, err = Empty(any)
+ if err != nil {
+ return err
+ }
+ }
+ m = dm.Message
+ }
+
+ anyName, err := AnyMessageName(any)
+ if err != nil {
+ return err
+ }
+ msgName := proto.MessageName(m)
+ if anyName != msgName {
+ return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
+ }
+ return proto.Unmarshal(any.Value, m)
+}
+
+// Is reports whether the Any message contains a message of the specified type.
+//
+// Deprecated: Call the any.MessageIs method instead.
+func Is(any *anypb.Any, m proto.Message) bool {
+ if any == nil || m == nil {
+ return false
+ }
+ name := proto.MessageName(m)
+ if !strings.HasSuffix(any.TypeUrl, name) {
+ return false
+ }
+ return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in an anypb.Any message.
+// The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+// var x ptypes.DynamicAny
+// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+// fmt.Printf("unmarshaled message: %v", x.Message)
+//
+// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
+// the any message contents into a new instance of the underlying message.
+type DynamicAny struct{ proto.Message }
+
+func (m DynamicAny) String() string {
+ if m.Message == nil {
+ return ""
+ }
+ return m.Message.String()
+}
+func (m DynamicAny) Reset() {
+ if m.Message == nil {
+ return
+ }
+ m.Message.Reset()
+}
+func (m DynamicAny) ProtoMessage() {
+ return
+}
+func (m DynamicAny) ProtoReflect() protoreflect.Message {
+ if m.Message == nil {
+ return nil
+ }
+ return dynamicAny{proto.MessageReflect(m.Message)}
+}
+
+type dynamicAny struct{ protoreflect.Message }
+
+func (m dynamicAny) Type() protoreflect.MessageType {
+ return dynamicAnyType{m.Message.Type()}
+}
+func (m dynamicAny) New() protoreflect.Message {
+ return dynamicAnyType{m.Message.Type()}.New()
+}
+func (m dynamicAny) Interface() protoreflect.ProtoMessage {
+ return DynamicAny{proto.MessageV1(m.Message.Interface())}
+}
+
+type dynamicAnyType struct{ protoreflect.MessageType }
+
+func (t dynamicAnyType) New() protoreflect.Message {
+ return dynamicAny{t.MessageType.New()}
+}
+func (t dynamicAnyType) Zero() protoreflect.Message {
+ return dynamicAny{t.MessageType.Zero()}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..0ef27d3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,62 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/any/any.proto
+
+package any
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/any.proto.
+
+type Any = anypb.Any
+
+var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+ 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
+func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
+ file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..d3c3325
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ptypes provides functionality for interacting with well-known types.
+//
+// Deprecated: Well-known types have specialized functionality directly
+// injected into the generated packages for each message type.
+// See the deprecation notice for each function for the suggested alternative.
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..b2b55dd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,76 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ durationpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+// Range of google.protobuf.Duration as specified in duration.proto.
+// This is about 10,000 years in seconds.
+const (
+ maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+ minSeconds = -maxSeconds
+)
+
+// Duration converts a durationpb.Duration to a time.Duration.
+// Duration returns an error if dur is invalid or overflows a time.Duration.
+//
+// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
+func Duration(dur *durationpb.Duration) (time.Duration, error) {
+ if err := validateDuration(dur); err != nil {
+ return 0, err
+ }
+ d := time.Duration(dur.Seconds) * time.Second
+ if int64(d/time.Second) != dur.Seconds {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
+ }
+ if dur.Nanos != 0 {
+ d += time.Duration(dur.Nanos) * time.Nanosecond
+ if (d < 0) != (dur.Nanos < 0) {
+ return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
+ }
+ }
+ return d, nil
+}
+
+// DurationProto converts a time.Duration to a durationpb.Duration.
+//
+// Deprecated: Call the durationpb.New function instead.
+func DurationProto(d time.Duration) *durationpb.Duration {
+ nanos := d.Nanoseconds()
+ secs := nanos / 1e9
+ nanos -= secs * 1e9
+ return &durationpb.Duration{
+ Seconds: int64(secs),
+ Nanos: int32(nanos),
+ }
+}
+
+// validateDuration determines whether the durationpb.Duration is valid
+// according to the definition in google/protobuf/duration.proto.
+// A valid durpb.Duration may still be too large to fit into a time.Duration
+// Note that the range of durationpb.Duration is about 10,000 years,
+// while the range of time.Duration is about 290 years.
+func validateDuration(dur *durationpb.Duration) error {
+ if dur == nil {
+ return errors.New("duration: nil Duration")
+ }
+ if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
+ return fmt.Errorf("duration: %v: seconds out of range", dur)
+ }
+ if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
+ return fmt.Errorf("duration: %v: nanos out of range", dur)
+ }
+ // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+ if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
+ return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..d0079ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,63 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
+
+package duration
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/duration.proto.
+
+type Duration = durationpb.Duration
+
+var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
+ 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+ 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
+func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..8368a3f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,112 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ timestamppb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+// Range of google.protobuf.Duration as specified in timestamp.proto.
+const (
+ // Seconds field of the earliest valid Timestamp.
+ // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ minValidSeconds = -62135596800
+ // Seconds field just after the latest valid Timestamp.
+ // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+ maxValidSeconds = 253402300800
+)
+
+// Timestamp converts a timestamppb.Timestamp to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return
+// value is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+//
+// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
+func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
+ // Don't return the zero value on error, because corresponds to a valid
+ // timestamp. Instead return whatever time.Unix gives us.
+ var t time.Time
+ if ts == nil {
+ t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+ } else {
+ t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+ }
+ return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+//
+// Deprecated: Call the timestamppb.Now function instead.
+func TimestampNow() *timestamppb.Timestamp {
+ ts, err := TimestampProto(time.Now())
+ if err != nil {
+ panic("ptypes: time.Now() out of Timestamp range")
+ }
+ return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+//
+// Deprecated: Call the timestamppb.New function instead.
+func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
+ ts := ×tamppb.Timestamp{
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
+ }
+ if err := validateTimestamp(ts); err != nil {
+ return nil, err
+ }
+ return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps.
+// For invalid Timestamps, it returns an error message in parentheses.
+//
+// Deprecated: Call the ts.AsTime method instead,
+// followed by a call to the Format method on the time.Time value.
+func TimestampString(ts *timestamppb.Timestamp) string {
+ t, err := Timestamp(ts)
+ if err != nil {
+ return fmt.Sprintf("(%v)", err)
+ }
+ return t.Format(time.RFC3339Nano)
+}
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
+// and has a Nanos field in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes the problem.
+//
+// Every valid Timestamp can be represented by a time.Time,
+// but the converse is not true.
+func validateTimestamp(ts *timestamppb.Timestamp) error {
+ if ts == nil {
+ return errors.New("timestamp: nil Timestamp")
+ }
+ if ts.Seconds < minValidSeconds {
+ return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+ }
+ if ts.Seconds >= maxValidSeconds {
+ return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+ }
+ if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+ return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..a76f807
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,64 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+
+package timestamp
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/timestamp.proto.
+
+type Timestamp = timestamppb.Timestamp
+
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+ 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+ 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+ if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+ DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+ }.Build()
+ File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+ file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod
new file mode 100644
index 0000000..f6406bb
--- /dev/null
+++ b/vendor/github.com/golang/snappy/go.mod
@@ -0,0 +1 @@
+module github.com/golang/snappy
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..ece692e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
new file mode 100644
index 0000000..d8156a6
--- /dev/null
+++ b/vendor/github.com/google/uuid/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+ - 1.4.3
+ - 1.5.3
+ - tip
+
+script:
+ - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
new file mode 100644
index 0000000..04fdf09
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -0,0 +1,10 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b4bb97f
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Paul Borman
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/vendor/github.com/google/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
new file mode 100644
index 0000000..9d92c11
--- /dev/null
+++ b/vendor/github.com/google/uuid/README.md
@@ -0,0 +1,19 @@
+# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+The uuid package generates and inspects UUIDs based on
+[RFC 4122](http://tools.ietf.org/html/rfc4122)
+and DCE 1.1: Authentication and Security Services.
+
+This package is based on the github.com/pborman/uuid package (previously named
+code.google.com/p/go-uuid). It differs from these earlier packages in that
+a UUID is a 16 byte array rather than a byte slice. One loss due to this
+change is the ability to represent an invalid UUID (vs a NIL UUID).
+
+###### Install
+`go get github.com/google/uuid`
+
+###### Documentation
+[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without
+installing this package by using the GoDoc site here:
+http://godoc.org/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
new file mode 100644
index 0000000..fa820b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/dce.go
@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+ uuid, err := NewUUID()
+ if err == nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCESecurity(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCESecurity(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID. Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+ return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+ return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
new file mode 100644
index 0000000..5b8a4b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/doc.go
@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
+// maps or compared directly.
+package uuid
diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod
new file mode 100644
index 0000000..fc84cd7
--- /dev/null
+++ b/vendor/github.com/google/uuid/go.mod
@@ -0,0 +1 @@
+module github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
new file mode 100644
index 0000000..b174616
--- /dev/null
+++ b/vendor/github.com/google/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+ NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+ Nil UUID // empty UUID, all zeros
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space[:])
+ h.Write(data)
+ s := h.Sum(nil)
+ var uuid UUID
+ copy(uuid[:], s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data. It is the same as calling:
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data. It is the same as calling:
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
new file mode 100644
index 0000000..7f9e0c6
--- /dev/null
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -0,0 +1,37 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+ var js [36]byte
+ encodeHex(js[:], uuid)
+ return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+ id, err := ParseBytes(data)
+ if err == nil {
+ *uuid = id
+ }
+ return err
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+ return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(uuid[:], data)
+ return nil
+}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
new file mode 100644
index 0000000..d651a2b
--- /dev/null
+++ b/vendor/github.com/google/uuid/node.go
@@ -0,0 +1,90 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "sync"
+)
+
+var (
+ nodeMu sync.Mutex
+ ifname string // name of interface being used
+ nodeID [6]byte // hardware for version 1 UUIDs
+ zeroID [6]byte // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+ iname, addr := getHardwareInterface(name) // null implementation for js
+ if iname != "" && addr != nil {
+ ifname = iname
+ copy(nodeID[:], addr)
+ return true
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ ifname = "random"
+ randomBits(nodeID[:])
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ nid := nodeID
+ return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ copy(nodeID[:], id)
+ ifname = "user"
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ var node [6]byte
+ copy(node[:], uuid[10:])
+ return node[:]
+}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 0000000..24b78ed
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This remvoves the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 0000000..0cbbcdd
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned. If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil {
+ return "", nil
+ }
+ }
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ return ifs.Name, ifs.HardwareAddr
+ }
+ }
+ return "", nil
+}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
new file mode 100644
index 0000000..f326b54
--- /dev/null
+++ b/vendor/github.com/google/uuid/sql.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case nil:
+ return nil
+
+ case string:
+ // if an empty UUID comes from a table, we return a null UUID
+ if src == "" {
+ return nil
+ }
+
+ // see Parse for required string format
+ u, err := Parse(src)
+ if err != nil {
+ return fmt.Errorf("Scan: %v", err)
+ }
+
+ *uuid = u
+
+ case []byte:
+ // if an empty UUID comes from a table, we return a null UUID
+ if len(src) == 0 {
+ return nil
+ }
+
+ // assumes a simple slice of bytes if 16 bytes
+ // otherwise attempts to parse
+ if len(src) != 16 {
+ return uuid.Scan(string(src))
+ }
+ copy((*uuid)[:], src)
+
+ default:
+ return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+ }
+
+ return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+ return uuid.String(), nil
+}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
new file mode 100644
index 0000000..e6ef06c
--- /dev/null
+++ b/vendor/github.com/google/uuid/time.go
@@ -0,0 +1,123 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ timeMu sync.Mutex
+ lasttime uint64 // last time we returned
+ clockSeq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clockSeq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
+func ClockSequence() int {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clockSeq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clockSeq & 0x3fff)
+}
+
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ oldSeq := clockSeq
+ clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if oldSeq != clockSeq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. The time is only defined for version 1 and 2 UUIDs.
+func (uuid UUID) Time() Time {
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ return Time(time)
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
new file mode 100644
index 0000000..5ea6c73
--- /dev/null
+++ b/vendor/github.com/google/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+ b1 := xvalues[x1]
+ b2 := xvalues[x2]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
new file mode 100644
index 0000000..524404c
--- /dev/null
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -0,0 +1,245 @@
+// Copyright 2018 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+var rander = rand.Reader // random function
+
+// Parse decodes s into a UUID or returns an error. Both the standard UUID
+// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
+// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
+// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+func Parse(s string) (UUID, error) {
+ var uuid UUID
+ switch len(s) {
+ // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36:
+
+ // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9:
+ if strings.ToLower(s[:9]) != "urn:uuid:" {
+ return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ case 36 + 2:
+ s = s[1:]
+
+ // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ case 32:
+ var ok bool
+ for i := range uuid {
+ uuid[i], ok = xtob(s[i*2], s[i*2+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, fmt.Errorf("invalid UUID length: %d", len(s))
+ }
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return uuid, errors.New("invalid UUID format")
+ }
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ v, ok := xtob(s[x], s[x+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ uuid[i] = v
+ }
+ return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+ var uuid UUID
+ switch len(b) {
+ case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+ }
+ b = b[9:]
+ case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ b = b[1:]
+ case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ var ok bool
+ for i := 0; i < 32; i += 2 {
+ uuid[i/2], ok = xtob(b[i], b[i+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, fmt.Errorf("invalid UUID length: %d", len(b))
+ }
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+ return uuid, errors.New("invalid UUID format")
+ }
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34} {
+ v, ok := xtob(b[x], b[x+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ uuid[i] = v
+ }
+ return uuid, nil
+}
+
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+ uuid, err := Parse(s)
+ if err != nil {
+ panic(`uuid: Parse(` + s + `): ` + err.Error())
+ }
+ return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+ err = uuid.UnmarshalBinary(b)
+ return uuid, err
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+ if err != nil {
+ panic(err)
+ }
+ return uuid
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ var buf [36]byte
+ encodeHex(buf[:], uuid)
+ return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ var buf [36 + 9]byte
+ copy(buf[:], "urn:uuid:")
+ encodeHex(buf[9:], uuid)
+ return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+ hex.Encode(dst, uuid[:4])
+ dst[8] = '-'
+ hex.Encode(dst[9:13], uuid[4:6])
+ dst[13] = '-'
+ hex.Encode(dst[14:18], uuid[6:8])
+ dst[18] = '-'
+ hex.Encode(dst[19:23], uuid[8:10])
+ dst[23] = '-'
+ hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+ return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
new file mode 100644
index 0000000..199a1ac
--- /dev/null
+++ b/vendor/github.com/google/uuid/version1.go
@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ nodeMu.Unlock()
+
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ timeLow := uint32(now & 0xffffffff)
+ timeMid := uint16((now >> 32) & 0xffff)
+ timeHi := uint16((now >> 48) & 0x0fff)
+ timeHi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], timeLow)
+ binary.BigEndian.PutUint16(uuid[4:], timeMid)
+ binary.BigEndian.PutUint16(uuid[6:], timeHi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+ copy(uuid[10:], nodeID[:])
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
new file mode 100644
index 0000000..84af91c
--- /dev/null
+++ b/vendor/github.com/google/uuid/version4.go
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New creates a new random UUID or panics. New is equivalent to
+// the expression
+//
+// uuid.Must(uuid.NewRandom())
+func New() UUID {
+ return Must(NewRandom())
+}
+
+// NewRandom returns a Random (Version 4) UUID.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() (UUID, error) {
+ var uuid UUID
+ _, err := io.ReadFull(rander, uuid[:])
+ if err != nil {
+ return Nil, err
+ }
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/LICENSE b/vendor/github.com/hashicorp/consul/api/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md
new file mode 100644
index 0000000..3255cbb
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/README.md
@@ -0,0 +1,67 @@
+Consul API client
+=================
+
+This package provides the `api` package which attempts to
+provide programmatic access to the full Consul API.
+
+Currently, all of the Consul APIs included in version 0.6.0 are supported.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api)
+
+Usage
+=====
+
+Below is an example of using the Consul client:
+
+```go
+package main
+
+import "github.com/hashicorp/consul/api"
+import "fmt"
+
+func main() {
+ // Get a new client
+ client, err := api.NewClient(api.DefaultConfig())
+ if err != nil {
+ panic(err)
+ }
+
+ // Get a handle to the KV API
+ kv := client.KV()
+
+ // PUT a new KV pair
+ p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")}
+ _, err = kv.Put(p, nil)
+ if err != nil {
+ panic(err)
+ }
+
+ // Lookup the pair
+ pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil)
+ if err != nil {
+ panic(err)
+ }
+ fmt.Printf("KV: %v %s\n", pair.Key, pair.Value)
+}
+```
+
+To run this example, start a Consul server:
+
+```bash
+consul agent -dev
+```
+
+Copy the code above into a file such as `main.go`.
+
+Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed.
+
+```bash
+$ go get
+$ go run main.go
+KV: REDIS_MAXCLIENTS 1000
+```
+
+After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv
diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go
new file mode 100644
index 0000000..7453feb
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/acl.go
@@ -0,0 +1,1363 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/url"
+ "time"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+const (
+ // ACLClientType is the client type token
+ ACLClientType = "client"
+
+ // ACLManagementType is the management type token
+ ACLManagementType = "management"
+)
+
+type ACLLink struct {
+ ID string
+ Name string
+}
+
+type ACLTokenPolicyLink = ACLLink
+type ACLTokenRoleLink = ACLLink
+
+// ACLToken represents an ACL Token
+type ACLToken struct {
+ CreateIndex uint64
+ ModifyIndex uint64
+ AccessorID string
+ SecretID string
+ Description string
+ Policies []*ACLTokenPolicyLink `json:",omitempty"`
+ Roles []*ACLTokenRoleLink `json:",omitempty"`
+ ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
+ NodeIdentities []*ACLNodeIdentity `json:",omitempty"`
+ Local bool
+ AuthMethod string `json:",omitempty"`
+ ExpirationTTL time.Duration `json:",omitempty"`
+ ExpirationTime *time.Time `json:",omitempty"`
+ CreateTime time.Time `json:",omitempty"`
+ Hash []byte `json:",omitempty"`
+
+ // DEPRECATED (ACL-Legacy-Compat)
+ // Rules will only be present for legacy tokens returned via the new APIs
+ Rules string `json:",omitempty"`
+
+ // Namespace is the namespace the ACLToken is associated with.
+ // Namespaces are a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+type ACLTokenListEntry struct {
+ CreateIndex uint64
+ ModifyIndex uint64
+ AccessorID string
+ Description string
+ Policies []*ACLTokenPolicyLink `json:",omitempty"`
+ Roles []*ACLTokenRoleLink `json:",omitempty"`
+ ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
+ NodeIdentities []*ACLNodeIdentity `json:",omitempty"`
+ Local bool
+ AuthMethod string `json:",omitempty"`
+ ExpirationTime *time.Time `json:",omitempty"`
+ CreateTime time.Time
+ Hash []byte
+ Legacy bool
+
+ // Namespace is the namespace the ACLTokenListEntry is associated with.
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+// ACLEntry is used to represent a legacy ACL token
+// The legacy tokens are deprecated.
+type ACLEntry struct {
+ CreateIndex uint64
+ ModifyIndex uint64
+ ID string
+ Name string
+ Type string
+ Rules string
+}
+
+// ACLReplicationStatus is used to represent the status of ACL replication.
+type ACLReplicationStatus struct {
+ Enabled bool
+ Running bool
+ SourceDatacenter string
+ ReplicationType string
+ ReplicatedIndex uint64
+ ReplicatedRoleIndex uint64
+ ReplicatedTokenIndex uint64
+ LastSuccess time.Time
+ LastError time.Time
+}
+
+// ACLServiceIdentity represents a high-level grant of all necessary privileges
+// to assume the identity of the named Service in the Catalog and within
+// Connect.
+type ACLServiceIdentity struct {
+ ServiceName string
+ Datacenters []string `json:",omitempty"`
+}
+
+// ACLNodeIdentity represents a high-level grant of all necessary privileges
+// to assume the identity of the named Node in the Catalog and within Connect.
+type ACLNodeIdentity struct {
+ NodeName string
+ Datacenter string
+}
+
+// ACLPolicy represents an ACL Policy.
+type ACLPolicy struct {
+ ID string
+ Name string
+ Description string
+ Rules string
+ Datacenters []string
+ Hash []byte
+ CreateIndex uint64
+ ModifyIndex uint64
+
+ // Namespace is the namespace the ACLPolicy is associated with.
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+type ACLPolicyListEntry struct {
+ ID string
+ Name string
+ Description string
+ Datacenters []string
+ Hash []byte
+ CreateIndex uint64
+ ModifyIndex uint64
+
+ // Namespace is the namespace the ACLPolicyListEntry is associated with.
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+type ACLRolePolicyLink = ACLLink
+
+// ACLRole represents an ACL Role.
+type ACLRole struct {
+ ID string
+ Name string
+ Description string
+ Policies []*ACLRolePolicyLink `json:",omitempty"`
+ ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
+ NodeIdentities []*ACLNodeIdentity `json:",omitempty"`
+ Hash []byte
+ CreateIndex uint64
+ ModifyIndex uint64
+
+ // Namespace is the namespace the ACLRole is associated with.
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+// BindingRuleBindType is the type of binding rule mechanism used.
+type BindingRuleBindType string
+
+const (
+ // BindingRuleBindTypeService binds to a service identity with the given name.
+ BindingRuleBindTypeService BindingRuleBindType = "service"
+
+ // BindingRuleBindTypeRole binds to pre-existing roles with the given name.
+ BindingRuleBindTypeRole BindingRuleBindType = "role"
+)
+
+type ACLBindingRule struct {
+ ID string
+ Description string
+ AuthMethod string
+ Selector string
+ BindType BindingRuleBindType
+ BindName string
+
+ CreateIndex uint64
+ ModifyIndex uint64
+
+ // Namespace is the namespace the ACLBindingRule is associated with.
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+type ACLAuthMethod struct {
+ Name string
+ Type string
+ DisplayName string `json:",omitempty"`
+ Description string `json:",omitempty"`
+ MaxTokenTTL time.Duration `json:",omitempty"`
+
+ // TokenLocality defines the kind of token that this auth method produces.
+ // This can be either 'local' or 'global'. If empty 'local' is assumed.
+ TokenLocality string `json:",omitempty"`
+
+ // Configuration is arbitrary configuration for the auth method. This
+ // should only contain primitive values and containers (such as lists and
+ // maps).
+ Config map[string]interface{}
+
+ CreateIndex uint64
+ ModifyIndex uint64
+
+ // NamespaceRules apply only on auth methods defined in the default namespace.
+ // Namespacing is a Consul Enterprise feature.
+ NamespaceRules []*ACLAuthMethodNamespaceRule `json:",omitempty"`
+
+ // Namespace is the namespace the ACLAuthMethod is associated with.
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+func (m *ACLAuthMethod) MarshalJSON() ([]byte, error) {
+ type Alias ACLAuthMethod
+ exported := &struct {
+ MaxTokenTTL string `json:",omitempty"`
+ *Alias
+ }{
+ MaxTokenTTL: m.MaxTokenTTL.String(),
+ Alias: (*Alias)(m),
+ }
+ if m.MaxTokenTTL == 0 {
+ exported.MaxTokenTTL = ""
+ }
+
+ return json.Marshal(exported)
+}
+
+func (m *ACLAuthMethod) UnmarshalJSON(data []byte) error {
+ type Alias ACLAuthMethod
+ aux := &struct {
+ MaxTokenTTL string
+ *Alias
+ }{
+ Alias: (*Alias)(m),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ var err error
+ if aux.MaxTokenTTL != "" {
+ if m.MaxTokenTTL, err = time.ParseDuration(aux.MaxTokenTTL); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type ACLAuthMethodNamespaceRule struct {
+ // Selector is an expression that matches against verified identity
+ // attributes returned from the auth method during login.
+ Selector string `json:",omitempty"`
+
+ // BindNamespace is the target namespace of the binding. Can be lightly
+ // templated using HIL ${foo} syntax from available field names.
+ //
+ // If empty it's created in the same namespace as the auth method.
+ BindNamespace string `json:",omitempty"`
+}
+
+type ACLAuthMethodListEntry struct {
+ Name string
+ Type string
+ DisplayName string `json:",omitempty"`
+ Description string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
+
+ // Namespace is the namespace the ACLAuthMethodListEntry is associated with.
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed
+// KubernetesAuthMethodConfig.
+func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) {
+ var config KubernetesAuthMethodConfig
+ decodeConf := &mapstructure.DecoderConfig{
+ Result: &config,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := mapstructure.NewDecoder(decodeConf)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := decoder.Decode(raw); err != nil {
+ return nil, fmt.Errorf("error decoding config: %s", err)
+ }
+
+ return &config, nil
+}
+
+// KubernetesAuthMethodConfig is the config for the built-in Consul auth method
+// for Kubernetes.
+type KubernetesAuthMethodConfig struct {
+ Host string `json:",omitempty"`
+ CACert string `json:",omitempty"`
+ ServiceAccountJWT string `json:",omitempty"`
+}
+
+// RenderToConfig converts this into a map[string]interface{} suitable for use
+// in the ACLAuthMethod.Config field.
+func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} {
+ return map[string]interface{}{
+ "Host": c.Host,
+ "CACert": c.CACert,
+ "ServiceAccountJWT": c.ServiceAccountJWT,
+ }
+}
+
+// OIDCAuthMethodConfig is the config for the built-in Consul auth method for
+// OIDC and JWT.
+type OIDCAuthMethodConfig struct {
+ // common for type=oidc and type=jwt
+ JWTSupportedAlgs []string `json:",omitempty"`
+ BoundAudiences []string `json:",omitempty"`
+ ClaimMappings map[string]string `json:",omitempty"`
+ ListClaimMappings map[string]string `json:",omitempty"`
+ OIDCDiscoveryURL string `json:",omitempty"`
+ OIDCDiscoveryCACert string `json:",omitempty"`
+ // just for type=oidc
+ OIDCClientID string `json:",omitempty"`
+ OIDCClientSecret string `json:",omitempty"`
+ OIDCScopes []string `json:",omitempty"`
+ AllowedRedirectURIs []string `json:",omitempty"`
+ VerboseOIDCLogging bool `json:",omitempty"`
+ // just for type=jwt
+ JWKSURL string `json:",omitempty"`
+ JWKSCACert string `json:",omitempty"`
+ JWTValidationPubKeys []string `json:",omitempty"`
+ BoundIssuer string `json:",omitempty"`
+ ExpirationLeeway time.Duration `json:",omitempty"`
+ NotBeforeLeeway time.Duration `json:",omitempty"`
+ ClockSkewLeeway time.Duration `json:",omitempty"`
+}
+
+// RenderToConfig converts this into a map[string]interface{} suitable for use
+// in the ACLAuthMethod.Config field.
+func (c *OIDCAuthMethodConfig) RenderToConfig() map[string]interface{} {
+ return map[string]interface{}{
+ // common for type=oidc and type=jwt
+ "JWTSupportedAlgs": c.JWTSupportedAlgs,
+ "BoundAudiences": c.BoundAudiences,
+ "ClaimMappings": c.ClaimMappings,
+ "ListClaimMappings": c.ListClaimMappings,
+ "OIDCDiscoveryURL": c.OIDCDiscoveryURL,
+ "OIDCDiscoveryCACert": c.OIDCDiscoveryCACert,
+ // just for type=oidc
+ "OIDCClientID": c.OIDCClientID,
+ "OIDCClientSecret": c.OIDCClientSecret,
+ "OIDCScopes": c.OIDCScopes,
+ "AllowedRedirectURIs": c.AllowedRedirectURIs,
+ "VerboseOIDCLogging": c.VerboseOIDCLogging,
+ // just for type=jwt
+ "JWKSURL": c.JWKSURL,
+ "JWKSCACert": c.JWKSCACert,
+ "JWTValidationPubKeys": c.JWTValidationPubKeys,
+ "BoundIssuer": c.BoundIssuer,
+ "ExpirationLeeway": c.ExpirationLeeway,
+ "NotBeforeLeeway": c.NotBeforeLeeway,
+ "ClockSkewLeeway": c.ClockSkewLeeway,
+ }
+}
+
+type ACLLoginParams struct {
+ AuthMethod string
+ BearerToken string
+ Meta map[string]string `json:",omitempty"`
+}
+
+type ACLOIDCAuthURLParams struct {
+ AuthMethod string
+ RedirectURI string
+ ClientNonce string
+ Meta map[string]string `json:",omitempty"`
+}
+
+// ACL can be used to query the ACL endpoints
+type ACL struct {
+ c *Client
+}
+
+// ACL returns a handle to the ACL endpoints
+func (c *Client) ACL() *ACL {
+ return &ACL{c}
+}
+
+// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster
+// to get the first management token.
+func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/bootstrap")
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLToken
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, wm, nil
+}
+
+// Create is used to generate a new token with the given parameters
+//
+// Deprecated: Use TokenCreate instead.
+func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/create")
+ r.setWriteOptions(q)
+ r.obj = acl
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Update is used to update the rules of an existing token
+//
+// Deprecated: Use TokenUpdate instead.
+func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/update")
+ r.setWriteOptions(q)
+ r.obj = acl
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// Destroy is used to destroy a given ACL token ID
+//
+// Deprecated: Use TokenDelete instead.
+func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// Clone is used to return a new token cloned from an existing one
+//
+// Deprecated: Use TokenClone instead.
+func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Info is used to query for information about an ACL token
+//
+// Deprecated: Use TokenRead instead.
+func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/info/"+id)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List is used to get all the ACL tokens
+//
+// Deprecated: Use TokenList instead.
+func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/list")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// Replication returns the status of the ACL replication process in the datacenter
+func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/replication")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries *ACLReplicationStatus
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields
+// of the ACLToken structure are empty they will be filled in by Consul.
+func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+ r := a.c.newRequest("PUT", "/v1/acl/token")
+ r.setWriteOptions(q)
+ r.obj = token
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLToken
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// TokenUpdate updates a token in place without modifying its AccessorID or SecretID. A valid
+// AccessorID must be set in the ACLToken structure passed to this function but the SecretID may
+// be omitted and will be filled in by Consul with its existing value.
+func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+ if token.AccessorID == "" {
+ return nil, nil, fmt.Errorf("Must specify an AccessorID for Token Updating")
+ }
+ r := a.c.newRequest("PUT", "/v1/acl/token/"+token.AccessorID)
+ r.setWriteOptions(q)
+ r.obj = token
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLToken
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// TokenClone will create a new token with the same policies and locality as the original
+// token but will have its own auto-generated AccessorID and SecretID as well having the
+// description passed to this function. The tokenID parameter must be a valid Accessor ID
+// of an existing token.
+func (a *ACL) TokenClone(tokenID string, description string, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+ if tokenID == "" {
+ return nil, nil, fmt.Errorf("Must specify a tokenID for Token Cloning")
+ }
+
+ r := a.c.newRequest("PUT", "/v1/acl/token/"+tokenID+"/clone")
+ r.setWriteOptions(q)
+ r.obj = struct{ Description string }{description}
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLToken
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// TokenDelete removes a single ACL token. The tokenID parameter must be a valid
+// Accessor ID of an existing token.
+func (a *ACL) TokenDelete(tokenID string, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("DELETE", "/v1/acl/token/"+tokenID)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// TokenRead retrieves the full token details. The tokenID parameter must be a valid
+// Accessor ID of an existing token.
+func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/token/"+tokenID)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out ACLToken
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+// TokenReadSelf retrieves the full token details of the token currently
+// assigned to the API Client. In this manner its possible to read a token
+// by its Secret ID.
+func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/token/self")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out ACLToken
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+// TokenList lists all tokens. The listing does not contain any SecretIDs as those
+// may only be retrieved by a call to TokenRead.
+func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/tokens")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLTokenListEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// PolicyCreate will create a new policy. It is not allowed for the policy parameters
+// ID field to be set as this will be generated by Consul while processing the request.
+func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) {
+ if policy.ID != "" {
+ return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation")
+ }
+ r := a.c.newRequest("PUT", "/v1/acl/policy")
+ r.setWriteOptions(q)
+ r.obj = policy
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLPolicy
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// PolicyUpdate updates a policy. The ID field of the policy parameter must be set to an
+// existing policy ID
+func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) {
+ if policy.ID == "" {
+ return nil, nil, fmt.Errorf("Must specify an ID in Policy Update")
+ }
+
+ r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID)
+ r.setWriteOptions(q)
+ r.obj = policy
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLPolicy
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// PolicyDelete deletes a policy given its ID.
+func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("DELETE", "/v1/acl/policy/"+policyID)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// PolicyRead retrieves the policy details including the rule set.
+func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/policy/"+policyID)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out ACLPolicy
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+// PolicyReadByName retrieves the policy details including the rule set with name.
+func (a *ACL) PolicyReadByName(policyName string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/policy/name/"+url.QueryEscape(policyName))
+ r.setQueryOptions(q)
+ found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if !found {
+ return nil, qm, nil
+ }
+
+ var out ACLPolicy
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+// PolicyList retrieves a listing of all policies. The listing does not include the
+// rules for any policy as those should be retrieved by subsequent calls to PolicyRead.
+func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/policies")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLPolicyListEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// RulesTranslate translates the legacy rule syntax into the current syntax.
+//
+// Deprecated: Support for the legacy syntax translation will be removed
+// when legacy ACL support is removed.
+func (a *ACL) RulesTranslate(rules io.Reader) (string, error) {
+ r := a.c.newRequest("POST", "/v1/acl/rules/translate")
+ r.body = rules
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ ruleBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", fmt.Errorf("Failed to read translated rule body: %v", err)
+ }
+
+ return string(ruleBytes), nil
+}
+
+// RulesTranslateToken translates the rules associated with the legacy syntax
+// into the current syntax and returns the results.
+//
+// Deprecated: Support for the legacy syntax translation will be removed
+// when legacy ACL support is removed.
+func (a *ACL) RulesTranslateToken(tokenID string) (string, error) {
+ r := a.c.newRequest("GET", "/v1/acl/rules/translate/"+tokenID)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ ruleBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", fmt.Errorf("Failed to read translated rule body: %v", err)
+ }
+
+ return string(ruleBytes), nil
+}
+
+// RoleCreate will create a new role. It is not allowed for the role parameters
+// ID field to be set as this will be generated by Consul while processing the request.
+func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) {
+ if role.ID != "" {
+ return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation")
+ }
+
+ r := a.c.newRequest("PUT", "/v1/acl/role")
+ r.setWriteOptions(q)
+ r.obj = role
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLRole
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// RoleUpdate updates a role. The ID field of the role parameter must be set to an
+// existing role ID
+func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) {
+ if role.ID == "" {
+ return nil, nil, fmt.Errorf("Must specify an ID in Role Update")
+ }
+
+ r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID)
+ r.setWriteOptions(q)
+ r.obj = role
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLRole
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// RoleDelete deletes a role given its ID.
+func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// RoleRead retrieves the role details (by ID). Returns nil if not found.
+func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/role/"+roleID)
+ r.setQueryOptions(q)
+ found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if !found {
+ return nil, qm, nil
+ }
+
+ var out ACLRole
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+// RoleReadByName retrieves the role details (by name). Returns nil if not found.
+func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName))
+ r.setQueryOptions(q)
+ found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if !found {
+ return nil, qm, nil
+ }
+
+ var out ACLRole
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+// RoleList retrieves a listing of all roles. The listing does not include some
+// metadata for the role as those should be retrieved by subsequent calls to
+// RoleRead.
+func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/roles")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLRole
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// AuthMethodCreate will create a new auth method.
+func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) {
+ if method.Name == "" {
+ return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation")
+ }
+
+ r := a.c.newRequest("PUT", "/v1/acl/auth-method")
+ r.setWriteOptions(q)
+ r.obj = method
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLAuthMethod
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// AuthMethodUpdate updates an auth method.
+func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) {
+ if method.Name == "" {
+ return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update")
+ }
+
+ r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name))
+ r.setWriteOptions(q)
+ r.obj = method
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLAuthMethod
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// AuthMethodDelete deletes an auth method given its Name.
+func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) {
+ if methodName == "" {
+ return nil, fmt.Errorf("Must specify a Name in Auth Method Delete")
+ }
+
+ r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName))
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// AuthMethodRead retrieves the auth method. Returns nil if not found.
+func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) {
+ if methodName == "" {
+ return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read")
+ }
+
+ r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName))
+ r.setQueryOptions(q)
+ found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if !found {
+ return nil, qm, nil
+ }
+
+ var out ACLAuthMethod
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+// AuthMethodList retrieves a listing of all auth methods. The listing does not
+// include some metadata for the auth method as those should be retrieved by
+// subsequent calls to AuthMethodRead.
+func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/auth-methods")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLAuthMethodListEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// BindingRuleCreate will create a new binding rule. It is not allowed for the
+// binding rule parameter's ID field to be set as this will be generated by
+// Consul while processing the request.
+func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) {
+ if rule.ID != "" {
+ return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation")
+ }
+
+ r := a.c.newRequest("PUT", "/v1/acl/binding-rule")
+ r.setWriteOptions(q)
+ r.obj = rule
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLBindingRule
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// BindingRuleUpdate updates a binding rule. The ID field of the role binding
+// rule parameter must be set to an existing binding rule ID.
+func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) {
+ if rule.ID == "" {
+ return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update")
+ }
+
+ r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID)
+ r.setWriteOptions(q)
+ r.obj = rule
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLBindingRule
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+// BindingRuleDelete deletes a binding rule given its ID.
+func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// BindingRuleRead retrieves the binding rule details. Returns nil if not found.
+func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID)
+ r.setQueryOptions(q)
+ found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if !found {
+ return nil, qm, nil
+ }
+
+ var out ACLBindingRule
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+// BindingRuleList retrieves a listing of all binding rules.
+func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/acl/binding-rules")
+ if methodName != "" {
+ r.params.Set("authmethod", methodName)
+ }
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*ACLBindingRule
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// Login is used to exchange auth method credentials for a newly-minted Consul Token.
+func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+ r := a.c.newRequest("POST", "/v1/acl/login")
+ r.setWriteOptions(q)
+ r.obj = auth
+
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLToken
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, wm, nil
+}
+
+// Logout is used to destroy a Consul Token created via Login().
+func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) {
+ r := a.c.newRequest("POST", "/v1/acl/logout")
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+// OIDCAuthURL requests an authorization URL to start an OIDC login flow.
+func (a *ACL) OIDCAuthURL(auth *ACLOIDCAuthURLParams, q *WriteOptions) (string, *WriteMeta, error) {
+ if auth.AuthMethod == "" {
+ return "", nil, fmt.Errorf("Must specify an auth method name")
+ }
+
+ r := a.c.newRequest("POST", "/v1/acl/oidc/auth-url")
+ r.setWriteOptions(q)
+ r.obj = auth
+
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out aclOIDCAuthURLResponse
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.AuthURL, wm, nil
+}
+
+type aclOIDCAuthURLResponse struct {
+ AuthURL string
+}
+
+type ACLOIDCCallbackParams struct {
+ AuthMethod string
+ State string
+ Code string
+ ClientNonce string
+}
+
+// OIDCCallback is the callback endpoint to complete an OIDC login.
+func (a *ACL) OIDCCallback(auth *ACLOIDCCallbackParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
+ if auth.AuthMethod == "" {
+ return nil, nil, fmt.Errorf("Must specify an auth method name")
+ }
+
+ r := a.c.newRequest("POST", "/v1/acl/oidc/callback")
+ r.setWriteOptions(q)
+ r.obj = auth
+
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out ACLToken
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go
new file mode 100644
index 0000000..3e11368
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/agent.go
@@ -0,0 +1,1139 @@
+package api
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+)
+
+// ServiceKind is the kind of service being registered.
+type ServiceKind string
+
+const (
+ // ServiceKindTypical is a typical, classic Consul service. This is
+ // represented by the absence of a value. This was chosen for ease of
+ // backwards compatibility: existing services in the catalog would
+ // default to the typical service.
+ ServiceKindTypical ServiceKind = ""
+
+ // ServiceKindConnectProxy is a proxy for the Connect feature. This
+ // service proxies another service within Consul and speaks the connect
+ // protocol.
+ ServiceKindConnectProxy ServiceKind = "connect-proxy"
+
+ // ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This
+ // service will proxy connections based off the SNI header set by other
+ // connect proxies
+ ServiceKindMeshGateway ServiceKind = "mesh-gateway"
+
+ // ServiceKindTerminatingGateway is a Terminating Gateway for the Connect
+ // feature. This service will proxy connections to services outside the mesh.
+ ServiceKindTerminatingGateway ServiceKind = "terminating-gateway"
+
+ // ServiceKindIngressGateway is an Ingress Gateway for the Connect feature.
+ // This service will ingress connections based of configuration defined in
+ // the ingress-gateway config entry.
+ ServiceKindIngressGateway ServiceKind = "ingress-gateway"
+)
+
+// UpstreamDestType is the type of upstream discovery mechanism.
+type UpstreamDestType string
+
+const (
+ // UpstreamDestTypeService discovers instances via healthy service lookup.
+ UpstreamDestTypeService UpstreamDestType = "service"
+
+ // UpstreamDestTypePreparedQuery discovers instances via prepared query
+ // execution.
+ UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query"
+)
+
+// AgentCheck represents a check known to the agent
+type AgentCheck struct {
+ Node string
+ CheckID string
+ Name string
+ Status string
+ Notes string
+ Output string
+ ServiceID string
+ ServiceName string
+ Type string
+ Definition HealthCheckDefinition
+ Namespace string `json:",omitempty"`
+}
+
+// AgentWeights represent optional weights for a service
+type AgentWeights struct {
+ Passing int
+ Warning int
+}
+
+// AgentService represents a service known to the agent
+type AgentService struct {
+ Kind ServiceKind `json:",omitempty"`
+ ID string
+ Service string
+ Tags []string
+ Meta map[string]string
+ Port int
+ Address string
+ TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
+ Weights AgentWeights
+ EnableTagOverride bool
+ CreateIndex uint64 `json:",omitempty" bexpr:"-"`
+ ModifyIndex uint64 `json:",omitempty" bexpr:"-"`
+ ContentHash string `json:",omitempty" bexpr:"-"`
+ Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
+ Connect *AgentServiceConnect `json:",omitempty"`
+ // NOTE: If we ever set the ContentHash outside of singular service lookup then we may need
+ // to include the Namespace in the hash. When we do, then we are in for lots of fun with tests.
+ // For now though, ignoring it works well enough.
+ Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"`
+}
+
+// AgentServiceChecksInfo returns information about a Service and its checks
+type AgentServiceChecksInfo struct {
+ AggregatedStatus string
+ Service *AgentService
+ Checks HealthChecks
+}
+
+// AgentServiceConnect represents the Connect configuration of a service.
+type AgentServiceConnect struct {
+ Native bool `json:",omitempty"`
+ SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
+}
+
+// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
+// ServiceDefinition or response.
+type AgentServiceConnectProxyConfig struct {
+ DestinationServiceName string `json:",omitempty"`
+ DestinationServiceID string `json:",omitempty"`
+ LocalServiceAddress string `json:",omitempty"`
+ LocalServicePort int `json:",omitempty"`
+ Config map[string]interface{} `json:",omitempty" bexpr:"-"`
+ Upstreams []Upstream `json:",omitempty"`
+ MeshGateway MeshGatewayConfig `json:",omitempty"`
+ Expose ExposeConfig `json:",omitempty"`
+}
+
+const (
+ // MemberTagKeyACLMode is the key used to indicate what ACL mode the agent is
+ // operating in. The values of this key will be one of the MemberACLMode constants
+ // with the key not being present indicating ACLModeUnknown.
+ MemberTagKeyACLMode = "acls"
+
+ // MemberTagRole is the key used to indicate that the member is a server or not.
+ MemberTagKeyRole = "role"
+
+ // MemberTagValueRoleServer is the value of the MemberTagKeyRole used to indicate
+ // that the member represents a Consul server.
+ MemberTagValueRoleServer = "consul"
+
+ // MemberTagKeySegment is the key name of the tag used to indicate which network
+ // segment this member is in.
+ // Network Segments are a Consul Enterprise feature.
+ MemberTagKeySegment = "segment"
+
+ // MemberTagKeyBootstrap is the key name of the tag used to indicate whether this
+ // agent was started with the "bootstrap" configuration enabled
+ MemberTagKeyBootstrap = "bootstrap"
+ // MemberTagValueBootstrap is the value of the MemberTagKeyBootstrap key when the
+ // agent was started with the "bootstrap" configuration enabled.
+ MemberTagValueBootstrap = "1"
+
+ // MemberTagKeyBootstrapExpect is the key name of the tag used to indicate whether
+ // this agent was started with the "bootstrap_expect" configuration set to a non-zero
+ // value. The value of this key will be the string for of that configuration value.
+ MemberTagKeyBootstrapExpect = "expect"
+
+ // MemberTagKeyUseTLS is the key name of the tag used to indicate whther this agent
+ // was configured to use TLS.
+ MemberTagKeyUseTLS = "use_tls"
+ // MemberTagValueUseTLS is the value of the MemberTagKeyUseTLS when the agent was
+ // configured to use TLS. Any other value indicates that it was not setup in
+ // that manner.
+ MemberTagValueUseTLS = "1"
+
+ // MemberTagKeyReadReplica is the key used to indicate that the member is a read
+ // replica server (will remain a Raft non-voter).
+ // Read Replicas are a Consul Enterprise feature.
+ MemberTagKeyReadReplica = "nonvoter"
+ // MemberTagValueReadReplica is the value of the MemberTagKeyReadReplica key when
+ // the member is in fact a read-replica. Any other value indicates that it is not.
+ // Read Replicas are a Consul Enterprise feature.
+ MemberTagValueReadReplica = "1"
+)
+
+type MemberACLMode string
+
+const (
+ // ACLModeDisables indicates that ACLs are disabled for this agent
+ ACLModeDisabled MemberACLMode = "0"
+ // ACLModeEnabled indicates that ACLs are enabled and operating in new ACL
+ // mode (v1.4.0+ ACLs)
+ ACLModeEnabled MemberACLMode = "1"
+ // ACLModeLegacy indicates that ACLs are enabled and operating in legacy mode.
+ ACLModeLegacy MemberACLMode = "2"
+ // ACLModeUnkown is used to indicate that the AgentMember.Tags didn't advertise
+ // an ACL mode at all. This is the case for Consul versions before v1.4.0 and
+ // should be treated similarly to ACLModeLegacy.
+ ACLModeUnknown MemberACLMode = "3"
+)
+
+// AgentMember represents a cluster member known to the agent
+type AgentMember struct {
+ Name string
+ Addr string
+ Port uint16
+ Tags map[string]string
+ Status int
+ ProtocolMin uint8
+ ProtocolMax uint8
+ ProtocolCur uint8
+ DelegateMin uint8
+ DelegateMax uint8
+ DelegateCur uint8
+}
+
+// ACLMode returns the ACL mode this agent is operating in.
+func (m *AgentMember) ACLMode() MemberACLMode {
+ mode := m.Tags[MemberTagKeyACLMode]
+
+ // the key may not have existed but then an
+ // empty string will be returned and we will
+ // handle that in the default case of the switch
+ switch MemberACLMode(mode) {
+ case ACLModeDisabled:
+ return ACLModeDisabled
+ case ACLModeEnabled:
+ return ACLModeEnabled
+ case ACLModeLegacy:
+ return ACLModeLegacy
+ default:
+ return ACLModeUnknown
+ }
+}
+
+// IsConsulServer returns true when this member is a Consul server.
+func (m *AgentMember) IsConsulServer() bool {
+ return m.Tags[MemberTagKeyRole] == MemberTagValueRoleServer
+}
+
+// AllSegments is used to select for all segments in MembersOpts.
+const AllSegments = "_all"
+
+// MembersOpts is used for querying member information.
+type MembersOpts struct {
+ // WAN is whether to show members from the WAN.
+ WAN bool
+
+ // Segment is the LAN segment to show members for. Setting this to the
+ // AllSegments value above will show members in all segments.
+ Segment string
+}
+
+// AgentServiceRegistration is used to register a new service
+type AgentServiceRegistration struct {
+ Kind ServiceKind `json:",omitempty"`
+ ID string `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Tags []string `json:",omitempty"`
+ Port int `json:",omitempty"`
+ Address string `json:",omitempty"`
+ TaggedAddresses map[string]ServiceAddress `json:",omitempty"`
+ EnableTagOverride bool `json:",omitempty"`
+ Meta map[string]string `json:",omitempty"`
+ Weights *AgentWeights `json:",omitempty"`
+ Check *AgentServiceCheck
+ Checks AgentServiceChecks
+ Proxy *AgentServiceConnectProxyConfig `json:",omitempty"`
+ Connect *AgentServiceConnect `json:",omitempty"`
+ Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"`
+}
+
+//ServiceRegisterOpts is used to pass extra options to the service register.
+type ServiceRegisterOpts struct {
+ //Missing healthchecks will be deleted from the agent.
+ //Using this parameter allows to idempotently register a service and its checks without
+ //having to manually deregister checks.
+ ReplaceExistingChecks bool
+}
+
+// AgentCheckRegistration is used to register a new check
+type AgentCheckRegistration struct {
+ ID string `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Notes string `json:",omitempty"`
+ ServiceID string `json:",omitempty"`
+ AgentServiceCheck
+ Namespace string `json:",omitempty"`
+}
+
+// AgentServiceCheck is used to define a node or service level check
+type AgentServiceCheck struct {
+ CheckID string `json:",omitempty"`
+ Name string `json:",omitempty"`
+ Args []string `json:"ScriptArgs,omitempty"`
+ DockerContainerID string `json:",omitempty"`
+ Shell string `json:",omitempty"` // Only supported for Docker.
+ Interval string `json:",omitempty"`
+ Timeout string `json:",omitempty"`
+ TTL string `json:",omitempty"`
+ HTTP string `json:",omitempty"`
+ Header map[string][]string `json:",omitempty"`
+ Method string `json:",omitempty"`
+ Body string `json:",omitempty"`
+ TCP string `json:",omitempty"`
+ Status string `json:",omitempty"`
+ Notes string `json:",omitempty"`
+ TLSSkipVerify bool `json:",omitempty"`
+ GRPC string `json:",omitempty"`
+ GRPCUseTLS bool `json:",omitempty"`
+ AliasNode string `json:",omitempty"`
+ AliasService string `json:",omitempty"`
+ SuccessBeforePassing int `json:",omitempty"`
+ FailuresBeforeCritical int `json:",omitempty"`
+
+ // In Consul 0.7 and later, checks that are associated with a service
+ // may also contain this optional DeregisterCriticalServiceAfter field,
+ // which is a timeout in the same Go time format as Interval and TTL. If
+ // a check is in the critical state for more than this configured value,
+ // then its associated service (and all of its associated checks) will
+ // automatically be deregistered.
+ DeregisterCriticalServiceAfter string `json:",omitempty"`
+}
+type AgentServiceChecks []*AgentServiceCheck
+
+// AgentToken is used when updating ACL tokens for an agent.
+type AgentToken struct {
+ Token string
+}
+
+// Metrics info is used to store different types of metric values from the agent.
+type MetricsInfo struct {
+ Timestamp string
+ Gauges []GaugeValue
+ Points []PointValue
+ Counters []SampledValue
+ Samples []SampledValue
+}
+
+// GaugeValue stores one value that is updated as time goes on, such as
+// the amount of memory allocated.
+type GaugeValue struct {
+ Name string
+ Value float32
+ Labels map[string]string
+}
+
+// PointValue holds a series of points for a metric.
+type PointValue struct {
+ Name string
+ Points []float32
+}
+
+// SampledValue stores info about a metric that is incremented over time,
+// such as the number of requests to an HTTP endpoint.
+type SampledValue struct {
+ Name string
+ Count int
+ Sum float64
+ Min float64
+ Max float64
+ Mean float64
+ Stddev float64
+ Labels map[string]string
+}
+
+// AgentAuthorizeParams are the request parameters for authorizing a request.
+type AgentAuthorizeParams struct {
+ Target string
+ ClientCertURI string
+ ClientCertSerial string
+}
+
+// AgentAuthorize is the response structure for Connect authorization.
+type AgentAuthorize struct {
+ Authorized bool
+ Reason string
+}
+
+// ConnectProxyConfig is the response structure for agent-local proxy
+// configuration.
+type ConnectProxyConfig struct {
+ ProxyServiceID string
+ TargetServiceID string
+ TargetServiceName string
+ ContentHash string
+ Config map[string]interface{} `bexpr:"-"`
+ Upstreams []Upstream
+}
+
+// Upstream is the response structure for a proxy upstream configuration.
+type Upstream struct {
+ DestinationType UpstreamDestType `json:",omitempty"`
+ DestinationNamespace string `json:",omitempty"`
+ DestinationName string
+ Datacenter string `json:",omitempty"`
+ LocalBindAddress string `json:",omitempty"`
+ LocalBindPort int `json:",omitempty"`
+ Config map[string]interface{} `json:",omitempty" bexpr:"-"`
+ MeshGateway MeshGatewayConfig `json:",omitempty"`
+}
+
+// Agent can be used to query the Agent endpoints
+type Agent struct {
+ c *Client
+
+ // cache the node name
+ nodeName string
+}
+
+// Agent returns a handle to the agent endpoints
+func (c *Client) Agent() *Agent {
+ return &Agent{c: c}
+}
+
+// Self is used to query the agent we are speaking to for
+// information about itself
+func (a *Agent) Self() (map[string]map[string]interface{}, error) {
+ r := a.c.newRequest("GET", "/v1/agent/self")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]map[string]interface{}
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Host is used to retrieve information about the host the
+// agent is running on such as CPU, memory, and disk. Requires
+// a operator:read ACL token.
+func (a *Agent) Host() (map[string]interface{}, error) {
+ r := a.c.newRequest("GET", "/v1/agent/host")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]interface{}
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Metrics is used to query the agent we are speaking to for
+// its current internal metric data
+func (a *Agent) Metrics() (*MetricsInfo, error) {
+ r := a.c.newRequest("GET", "/v1/agent/metrics")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out *MetricsInfo
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Reload triggers a configuration reload for the agent we are connected to.
+func (a *Agent) Reload() error {
+ r := a.c.newRequest("PUT", "/v1/agent/reload")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// NodeName is used to get the node name of the agent
+func (a *Agent) NodeName() (string, error) {
+ if a.nodeName != "" {
+ return a.nodeName, nil
+ }
+ info, err := a.Self()
+ if err != nil {
+ return "", err
+ }
+ name := info["Config"]["NodeName"].(string)
+ a.nodeName = name
+ return name, nil
+}
+
+// Checks returns the locally registered checks
+func (a *Agent) Checks() (map[string]*AgentCheck, error) {
+ return a.ChecksWithFilter("")
+}
+
+// ChecksWithFilter returns a subset of the locally registered checks that match
+// the given filter expression
+func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
+ r := a.c.newRequest("GET", "/v1/agent/checks")
+ r.filterQuery(filter)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]*AgentCheck
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Services returns the locally registered services
+func (a *Agent) Services() (map[string]*AgentService, error) {
+ return a.ServicesWithFilter("")
+}
+
+// ServicesWithFilter returns a subset of the locally registered services that match
+// the given filter expression
+func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
+ r := a.c.newRequest("GET", "/v1/agent/services")
+ r.filterQuery(filter)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out map[string]*AgentService
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+
+ return out, nil
+}
+
+// AgentHealthServiceByID returns for a given serviceID: the aggregated health status, the service definition or an error if any
+// - If the service is not found, will return status (critical, nil, nil)
+// - If the service is found, will return (critical|passing|warning), AgentServiceChecksInfo, nil)
+// - In all other cases, will return an error
+func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceChecksInfo, error) {
+ path := fmt.Sprintf("/v1/agent/health/service/id/%v", url.PathEscape(serviceID))
+ r := a.c.newRequest("GET", path)
+ r.params.Add("format", "json")
+ r.header.Set("Accept", "application/json")
+ _, resp, err := a.c.doRequest(r)
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+ // Service not Found
+ if resp.StatusCode == http.StatusNotFound {
+ return HealthCritical, nil, nil
+ }
+ var out *AgentServiceChecksInfo
+ if err := decodeBody(resp, &out); err != nil {
+ return HealthCritical, out, err
+ }
+ switch resp.StatusCode {
+ case http.StatusOK:
+ return HealthPassing, out, nil
+ case http.StatusTooManyRequests:
+ return HealthWarning, out, nil
+ case http.StatusServiceUnavailable:
+ return HealthCritical, out, nil
+ }
+ return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path)
+}
+
+// AgentHealthServiceByName returns for a given service name: the aggregated health status for all services
+// having the specified name.
+// - If no service is not found, will return status (critical, [], nil)
+// - If the service is found, will return (critical|passing|warning), []api.AgentServiceChecksInfo, nil)
+// - In all other cases, will return an error
+func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentServiceChecksInfo, error) {
+ path := fmt.Sprintf("/v1/agent/health/service/name/%v", url.PathEscape(service))
+ r := a.c.newRequest("GET", path)
+ r.params.Add("format", "json")
+ r.header.Set("Accept", "application/json")
+ _, resp, err := a.c.doRequest(r)
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+ // Service not Found
+ if resp.StatusCode == http.StatusNotFound {
+ return HealthCritical, nil, nil
+ }
+ var out []AgentServiceChecksInfo
+ if err := decodeBody(resp, &out); err != nil {
+ return HealthCritical, out, err
+ }
+ switch resp.StatusCode {
+ case http.StatusOK:
+ return HealthPassing, out, nil
+ case http.StatusTooManyRequests:
+ return HealthWarning, out, nil
+ case http.StatusServiceUnavailable:
+ return HealthCritical, out, nil
+ }
+ return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path)
+}
+
+// Service returns a locally registered service instance and allows for
+// hash-based blocking.
+//
+// Note that this uses an unconventional blocking mechanism since it's
+// agent-local state. That means there is no persistent raft index so we block
+// based on object hash instead.
+func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out *AgentService
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return out, qm, nil
+}
+
+// Members returns the known gossip members. The WAN
+// flag can be used to query a server for WAN members.
+func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
+ r := a.c.newRequest("GET", "/v1/agent/members")
+ if wan {
+ r.params.Set("wan", "1")
+ }
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []*AgentMember
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MembersOpts returns the known gossip members and can be passed
+// additional options for WAN/segment filtering.
+func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) {
+ r := a.c.newRequest("GET", "/v1/agent/members")
+ r.params.Set("segment", opts.Segment)
+ if opts.WAN {
+ r.params.Set("wan", "1")
+ }
+
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []*AgentMember
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ServiceRegister is used to register a new service with
+// the local agent
+func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
+ opts := ServiceRegisterOpts{
+ ReplaceExistingChecks: false,
+ }
+
+ return a.serviceRegister(service, opts)
+}
+
+// ServiceRegister is used to register a new service with
+// the local agent and can be passed additional options.
+func (a *Agent) ServiceRegisterOpts(service *AgentServiceRegistration, opts ServiceRegisterOpts) error {
+ return a.serviceRegister(service, opts)
+}
+
+func (a *Agent) serviceRegister(service *AgentServiceRegistration, opts ServiceRegisterOpts) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/register")
+ r.obj = service
+ if opts.ReplaceExistingChecks {
+ r.params.Set("replace-existing-checks", "true")
+ }
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ServiceDeregister is used to deregister a service with
+// the local agent
+func (a *Agent) ServiceDeregister(serviceID string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// PassTTL is used to set a TTL check to the passing state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) PassTTL(checkID, note string) error {
+ return a.updateTTL(checkID, note, "pass")
+}
+
+// WarnTTL is used to set a TTL check to the warning state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) WarnTTL(checkID, note string) error {
+ return a.updateTTL(checkID, note, "warn")
+}
+
+// FailTTL is used to set a TTL check to the failing state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) FailTTL(checkID, note string) error {
+ return a.updateTTL(checkID, note, "fail")
+}
+
+// updateTTL is used to update the TTL of a check. This is the internal
+// method that uses the old API that's present in Consul versions prior to
+// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed
+// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below,
+// but keep the old Pass/Warn/Fail methods using the old API under the hood.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 and the server endpoints will
+// be removed in 0.9.
+func (a *Agent) updateTTL(checkID, note, status string) error {
+ switch status {
+ case "pass":
+ case "warn":
+ case "fail":
+ default:
+ return fmt.Errorf("Invalid status: %s", status)
+ }
+ endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
+ r := a.c.newRequest("PUT", endpoint)
+ r.params.Set("note", note)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// checkUpdate is the payload for a PUT for a check update.
+type checkUpdate struct {
+ // Status is one of the api.Health* states: HealthPassing
+ // ("passing"), HealthWarning ("warning"), or HealthCritical
+ // ("critical").
+ Status string
+
+ // Output is the information to post to the UI for operators as the
+ // output of the process that decided to hit the TTL check. This is
+ // different from the note field that's associated with the check
+ // itself.
+ Output string
+}
+
+// UpdateTTL is used to update the TTL of a check. This uses the newer API
+// that was introduced in Consul 0.6.4 and later. We translate the old status
+// strings for compatibility (though a newer version of Consul will still be
+// required to use this API).
+func (a *Agent) UpdateTTL(checkID, output, status string) error {
+ switch status {
+ case "pass", HealthPassing:
+ status = HealthPassing
+ case "warn", HealthWarning:
+ status = HealthWarning
+ case "fail", HealthCritical:
+ status = HealthCritical
+ default:
+ return fmt.Errorf("Invalid status: %s", status)
+ }
+
+ endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
+ r := a.c.newRequest("PUT", endpoint)
+ r.obj = &checkUpdate{
+ Status: status,
+ Output: output,
+ }
+
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// CheckRegister is used to register a new check with
+// the local agent
+func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
+ r := a.c.newRequest("PUT", "/v1/agent/check/register")
+ r.obj = check
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// CheckDeregister is used to deregister a check with
+// the local agent
+func (a *Agent) CheckDeregister(checkID string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// Join is used to instruct the agent to attempt a join to
+// another cluster member
+func (a *Agent) Join(addr string, wan bool) error {
+ r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
+ if wan {
+ r.params.Set("wan", "1")
+ }
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// Leave is used to have the agent gracefully leave the cluster and shutdown
+func (a *Agent) Leave() error {
+ r := a.c.newRequest("PUT", "/v1/agent/leave")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ForceLeave is used to have the agent eject a failed node
+func (a *Agent) ForceLeave(node string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+//ForceLeavePrune is used to have an a failed agent removed
+//from the list of members
+func (a *Agent) ForceLeavePrune(node string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
+ r.params.Set("prune", "1")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// ConnectAuthorize is used to authorize an incoming connection
+// to a natively integrated Connect service.
+func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) {
+ r := a.c.newRequest("POST", "/v1/agent/connect/authorize")
+ r.obj = auth
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out AgentAuthorize
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+// ConnectCARoots returns the list of roots.
+func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out CARootList
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, qm, nil
+}
+
+// ConnectCALeaf gets the leaf certificate for the given service ID.
+func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) {
+ r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out LeafCert
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, qm, nil
+}
+
+// EnableServiceMaintenance toggles service maintenance mode on
+// for the given service ID.
+func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+ r.params.Set("enable", "true")
+ r.params.Set("reason", reason)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// DisableServiceMaintenance toggles service maintenance mode off
+// for the given service ID.
+func (a *Agent) DisableServiceMaintenance(serviceID string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+ r.params.Set("enable", "false")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// EnableNodeMaintenance toggles node maintenance mode on for the
+// agent we are connected to.
+func (a *Agent) EnableNodeMaintenance(reason string) error {
+ r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+ r.params.Set("enable", "true")
+ r.params.Set("reason", reason)
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// DisableNodeMaintenance toggles node maintenance mode off for the
+// agent we are connected to.
+func (a *Agent) DisableNodeMaintenance() error {
+ r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+ r.params.Set("enable", "false")
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// Monitor returns a channel which will receive streaming logs from the agent
+// Providing a non-nil stopCh can be used to close the connection and stop the
+// log stream. An empty string will be sent down the given channel when there's
+// nothing left to stream, after which the caller should close the stopCh.
+func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
+ return a.monitor(loglevel, false, stopCh, q)
+}
+
+// MonitorJSON is like Monitor except it returns logs in JSON format.
+func (a *Agent) MonitorJSON(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
+ return a.monitor(loglevel, true, stopCh, q)
+}
+func (a *Agent) monitor(loglevel string, logJSON bool, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
+ r := a.c.newRequest("GET", "/v1/agent/monitor")
+ r.setQueryOptions(q)
+ if loglevel != "" {
+ r.params.Add("loglevel", loglevel)
+ }
+ if logJSON {
+ r.params.Set("logjson", "true")
+ }
+ _, resp, err := requireOK(a.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ logCh := make(chan string, 64)
+ go func() {
+ defer resp.Body.Close()
+ scanner := bufio.NewScanner(resp.Body)
+ for {
+ select {
+ case <-stopCh:
+ close(logCh)
+ return
+ default:
+ }
+ if scanner.Scan() {
+ // An empty string signals to the caller that
+ // the scan is done, so make sure we only emit
+ // that when the scanner says it's done, not if
+ // we happen to ingest an empty line.
+ if text := scanner.Text(); text != "" {
+ logCh <- text
+ } else {
+ logCh <- " "
+ }
+ } else {
+ logCh <- ""
+ }
+ }
+ }()
+ return logCh, nil
+}
+
+// UpdateACLToken updates the agent's "acl_token". See updateToken for more
+// details.
+//
+// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateDefaultACLToken for v1.4.3 and above
+func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
+ return a.updateToken("acl_token", token, q)
+}
+
+// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken
+// for more details.
+//
+// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentACLToken for v1.4.3 and above
+func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) {
+ return a.updateToken("acl_agent_token", token, q)
+}
+
+// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See
+// updateToken for more details.
+//
+// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentMasterACLToken for v1.4.3 and above
+func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) {
+ return a.updateToken("acl_agent_master_token", token, q)
+}
+
+// UpdateACLReplicationToken updates the agent's "acl_replication_token". See
+// updateToken for more details.
+//
+// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateReplicationACLToken for v1.4.3 and above
+func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) {
+ return a.updateToken("acl_replication_token", token, q)
+}
+
+// UpdateDefaultACLToken updates the agent's "default" token. See updateToken
+// for more details
+func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
+ return a.updateTokenFallback("default", "acl_token", token, q)
+}
+
+// UpdateAgentACLToken updates the agent's "agent" token. See updateToken
+// for more details
+func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
+ return a.updateTokenFallback("agent", "acl_agent_token", token, q)
+}
+
+// UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken
+// for more details
+func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
+ return a.updateTokenFallback("agent_master", "acl_agent_master_token", token, q)
+}
+
+// UpdateReplicationACLToken updates the agent's "replication" token. See updateToken
+// for more details
+func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
+ return a.updateTokenFallback("replication", "acl_replication_token", token, q)
+}
+
+// updateToken can be used to update one of an agent's ACL tokens after the agent has
+// started. The tokens are may not be persisted, so will need to be updated again if
+// the agent is restarted unless the agent is configured to persist them.
+func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) {
+ meta, _, err := a.updateTokenOnce(target, token, q)
+ return meta, err
+}
+
+func (a *Agent) updateTokenFallback(target, fallback, token string, q *WriteOptions) (*WriteMeta, error) {
+ meta, status, err := a.updateTokenOnce(target, token, q)
+ if err != nil && status == 404 {
+ meta, _, err = a.updateTokenOnce(fallback, token, q)
+ }
+ return meta, err
+}
+
+func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMeta, int, error) {
+ r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target))
+ r.setWriteOptions(q)
+ r.obj = &AgentToken{Token: token}
+
+ rtt, resp, err := a.c.doRequest(r)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+
+ if resp.StatusCode != 200 {
+ var buf bytes.Buffer
+ io.Copy(&buf, resp.Body)
+ return wm, resp.StatusCode, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+ }
+
+ return wm, resp.StatusCode, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go
new file mode 100644
index 0000000..38a4e98
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/api.go
@@ -0,0 +1,1043 @@
+package api
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/hashicorp/go-cleanhttp"
+ "github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-rootcerts"
+)
+
+const (
+ // HTTPAddrEnvName defines an environment variable name which sets
+ // the HTTP address if there is no -http-addr specified.
+ HTTPAddrEnvName = "CONSUL_HTTP_ADDR"
+
+ // HTTPTokenEnvName defines an environment variable name which sets
+ // the HTTP token.
+ HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
+
+ // HTTPTokenFileEnvName defines an environment variable name which sets
+ // the HTTP token file.
+ HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE"
+
+ // HTTPAuthEnvName defines an environment variable name which sets
+ // the HTTP authentication header.
+ HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
+
+ // HTTPSSLEnvName defines an environment variable name which sets
+ // whether or not to use HTTPS.
+ HTTPSSLEnvName = "CONSUL_HTTP_SSL"
+
+ // HTTPCAFile defines an environment variable name which sets the
+ // CA file to use for talking to Consul over TLS.
+ HTTPCAFile = "CONSUL_CACERT"
+
+ // HTTPCAPath defines an environment variable name which sets the
+ // path to a directory of CA certs to use for talking to Consul over TLS.
+ HTTPCAPath = "CONSUL_CAPATH"
+
+ // HTTPClientCert defines an environment variable name which sets the
+ // client cert file to use for talking to Consul over TLS.
+ HTTPClientCert = "CONSUL_CLIENT_CERT"
+
+ // HTTPClientKey defines an environment variable name which sets the
+ // client key file to use for talking to Consul over TLS.
+ HTTPClientKey = "CONSUL_CLIENT_KEY"
+
+ // HTTPTLSServerName defines an environment variable name which sets the
+ // server name to use as the SNI host when connecting via TLS
+ HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME"
+
+ // HTTPSSLVerifyEnvName defines an environment variable name which sets
+ // whether or not to disable certificate checking.
+ HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
+
+ // GRPCAddrEnvName defines an environment variable name which sets the gRPC
+ // address for consul connect envoy. Note this isn't actually used by the api
+ // client in this package but is defined here for consistency with all the
+ // other ENV names we use.
+ GRPCAddrEnvName = "CONSUL_GRPC_ADDR"
+
+ // HTTPNamespaceEnvVar defines an environment variable name which sets
+ // the HTTP Namespace to be used by default. This can still be overridden.
+ HTTPNamespaceEnvName = "CONSUL_NAMESPACE"
+)
+
+// QueryOptions are used to parameterize a query
+type QueryOptions struct {
+ // Namespace overrides the `default` namespace
+ // Note: Namespaces are available only in Consul Enterprise
+ Namespace string
+
+ // Providing a datacenter overwrites the DC provided
+ // by the Config
+ Datacenter string
+
+ // AllowStale allows any Consul server (non-leader) to service
+ // a read. This allows for lower latency and higher throughput
+ AllowStale bool
+
+ // RequireConsistent forces the read to be fully consistent.
+ // This is more expensive but prevents ever performing a stale
+ // read.
+ RequireConsistent bool
+
+ // UseCache requests that the agent cache results locally. See
+ // https://www.consul.io/api/features/caching.html for more details on the
+ // semantics.
+ UseCache bool
+
+ // MaxAge limits how old a cached value will be returned if UseCache is true.
+ // If there is a cached response that is older than the MaxAge, it is treated
+ // as a cache miss and a new fetch invoked. If the fetch fails, the error is
+ // returned. Clients that wish to allow for stale results on error can set
+ // StaleIfError to a longer duration to change this behavior. It is ignored
+ // if the endpoint supports background refresh caching. See
+ // https://www.consul.io/api/features/caching.html for more details.
+ MaxAge time.Duration
+
+ // StaleIfError specifies how stale the client will accept a cached response
+ // if the servers are unavailable to fetch a fresh one. Only makes sense when
+ // UseCache is true and MaxAge is set to a lower, non-zero value. It is
+ // ignored if the endpoint supports background refresh caching. See
+ // https://www.consul.io/api/features/caching.html for more details.
+ StaleIfError time.Duration
+
+ // WaitIndex is used to enable a blocking query. Waits
+ // until the timeout or the next index is reached
+ WaitIndex uint64
+
+ // WaitHash is used by some endpoints instead of WaitIndex to perform blocking
+ // on state based on a hash of the response rather than a monotonic index.
+ // This is required when the state being blocked on is not stored in Raft, for
+ // example agent-local proxy configuration.
+ WaitHash string
+
+ // WaitTime is used to bound the duration of a wait.
+ // Defaults to that of the Config, but can be overridden.
+ WaitTime time.Duration
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+
+ // Near is used to provide a node name that will sort the results
+ // in ascending order based on the estimated round trip time from
+ // that node. Setting this to "_agent" will use the agent's node
+ // for the sort.
+ Near string
+
+ // NodeMeta is used to filter results by nodes with the given
+ // metadata key/value pairs. Currently, only one key/value pair can
+ // be provided for filtering.
+ NodeMeta map[string]string
+
+ // RelayFactor is used in keyring operations to cause responses to be
+ // relayed back to the sender through N other random nodes. Must be
+ // a value from 0 to 5 (inclusive).
+ RelayFactor uint8
+
+ // LocalOnly is used in keyring list operation to force the keyring
+ // query to only hit local servers (no WAN traffic).
+ LocalOnly bool
+
+ // Connect filters prepared query execution to only include Connect-capable
+ // services. This currently affects prepared query execution.
+ Connect bool
+
+ // ctx is an optional context pass through to the underlying HTTP
+ // request layer. Use Context() and WithContext() to manage this.
+ ctx context.Context
+
+ // Filter requests filtering data prior to it being returned. The string
+ // is a go-bexpr compatible expression.
+ Filter string
+}
+
+func (o *QueryOptions) Context() context.Context {
+ if o != nil && o.ctx != nil {
+ return o.ctx
+ }
+ return context.Background()
+}
+
+func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions {
+ o2 := new(QueryOptions)
+ if o != nil {
+ *o2 = *o
+ }
+ o2.ctx = ctx
+ return o2
+}
+
+// WriteOptions are used to parameterize a write
+type WriteOptions struct {
+ // Namespace overrides the `default` namespace
+ // Note: Namespaces are available only in Consul Enterprise
+ Namespace string
+
+ // Providing a datacenter overwrites the DC provided
+ // by the Config
+ Datacenter string
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+
+ // RelayFactor is used in keyring operations to cause responses to be
+ // relayed back to the sender through N other random nodes. Must be
+ // a value from 0 to 5 (inclusive).
+ RelayFactor uint8
+
+ // ctx is an optional context pass through to the underlying HTTP
+ // request layer. Use Context() and WithContext() to manage this.
+ ctx context.Context
+}
+
+func (o *WriteOptions) Context() context.Context {
+ if o != nil && o.ctx != nil {
+ return o.ctx
+ }
+ return context.Background()
+}
+
+func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions {
+ o2 := new(WriteOptions)
+ if o != nil {
+ *o2 = *o
+ }
+ o2.ctx = ctx
+ return o2
+}
+
+// QueryMeta is used to return meta data about a query
+type QueryMeta struct {
+ // LastIndex. This can be used as a WaitIndex to perform
+ // a blocking query
+ LastIndex uint64
+
+ // LastContentHash. This can be used as a WaitHash to perform a blocking query
+ // for endpoints that support hash-based blocking. Endpoints that do not
+ // support it will return an empty hash.
+ LastContentHash string
+
+ // Time of last contact from the leader for the
+ // server servicing the request
+ LastContact time.Duration
+
+ // Is there a known leader
+ KnownLeader bool
+
+ // How long did the request take
+ RequestTime time.Duration
+
+ // Is address translation enabled for HTTP responses on this agent
+ AddressTranslationEnabled bool
+
+ // CacheHit is true if the result was served from agent-local cache.
+ CacheHit bool
+
+ // CacheAge is set if request was ?cached and indicates how stale the cached
+ // response is.
+ CacheAge time.Duration
+}
+
+// WriteMeta is used to return meta data about a write
+type WriteMeta struct {
+ // How long did the request take
+ RequestTime time.Duration
+}
+
+// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
+type HttpBasicAuth struct {
+ // Username to use for HTTP Basic Authentication
+ Username string
+
+ // Password to use for HTTP Basic Authentication
+ Password string
+}
+
+// Config is used to configure the creation of a client
+type Config struct {
+ // Address is the address of the Consul server
+ Address string
+
+ // Scheme is the URI scheme for the Consul server
+ Scheme string
+
+ // Datacenter to use. If not provided, the default agent datacenter is used.
+ Datacenter string
+
+ // Transport is the Transport to use for the http client.
+ Transport *http.Transport
+
+ // HttpClient is the client to use. Default will be
+ // used if not provided.
+ HttpClient *http.Client
+
+ // HttpAuth is the auth info to use for http access.
+ HttpAuth *HttpBasicAuth
+
+ // WaitTime limits how long a Watch will block. If not provided,
+ // the agent default values will be used.
+ WaitTime time.Duration
+
+ // Token is used to provide a per-request ACL token
+ // which overrides the agent's default token.
+ Token string
+
+ // TokenFile is a file containing the current token to use for this client.
+ // If provided it is read once at startup and never again.
+ TokenFile string
+
+ // Namespace is the name of the namespace to send along for the request
+ // when no other Namespace ispresent in the QueryOptions
+ Namespace string
+
+ TLSConfig TLSConfig
+}
+
+// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
+// Consul using TLS.
+type TLSConfig struct {
+ // Address is the optional address of the Consul server. The port, if any
+ // will be removed from here and this will be set to the ServerName of the
+ // resulting config.
+ Address string
+
+ // CAFile is the optional path to the CA certificate used for Consul
+ // communication, defaults to the system bundle if not specified.
+ CAFile string
+
+ // CAPath is the optional path to a directory of CA certificates to use for
+ // Consul communication, defaults to the system bundle if not specified.
+ CAPath string
+
+ // CAPem is the optional PEM-encoded CA certificate used for Consul
+ // communication, defaults to the system bundle if not specified.
+ CAPem []byte
+
+ // CertFile is the optional path to the certificate for Consul
+ // communication. If this is set then you need to also set KeyFile.
+ CertFile string
+
+ // CertPEM is the optional PEM-encoded certificate for Consul
+ // communication. If this is set then you need to also set KeyPEM.
+ CertPEM []byte
+
+ // KeyFile is the optional path to the private key for Consul communication.
+ // If this is set then you need to also set CertFile.
+ KeyFile string
+
+ // KeyPEM is the optional PEM-encoded private key for Consul communication.
+ // If this is set then you need to also set CertPEM.
+ KeyPEM []byte
+
+ // InsecureSkipVerify if set to true will disable TLS host verification.
+ InsecureSkipVerify bool
+}
+
+// DefaultConfig returns a default configuration for the client. By default this
+// will pool and reuse idle connections to Consul. If you have a long-lived
+// client object, this is the desired behavior and should make the most efficient
+// use of the connections to Consul. If you don't reuse a client object, which
+// is not recommended, then you may notice idle connections building up over
+// time. To avoid this, use the DefaultNonPooledConfig() instead.
+func DefaultConfig() *Config {
+ return defaultConfig(nil, cleanhttp.DefaultPooledTransport)
+}
+
+// DefaultConfigWithLogger returns a default configuration for the client. It
+// is exactly the same as DefaultConfig, but allows for a pre-configured logger
+// object to be passed through.
+func DefaultConfigWithLogger(logger hclog.Logger) *Config {
+ return defaultConfig(logger, cleanhttp.DefaultPooledTransport)
+}
+
+// DefaultNonPooledConfig returns a default configuration for the client which
+// does not pool connections. This isn't a recommended configuration because it
+// will reconnect to Consul on every request, but this is useful to avoid the
+// accumulation of idle connections if you make many client objects during the
+// lifetime of your application.
+func DefaultNonPooledConfig() *Config {
+ return defaultConfig(nil, cleanhttp.DefaultTransport)
+}
+
+// defaultConfig returns the default configuration for the client, using the
+// given function to make the transport.
+func defaultConfig(logger hclog.Logger, transportFn func() *http.Transport) *Config {
+ if logger == nil {
+ logger = hclog.New(&hclog.LoggerOptions{
+ Name: "consul-api",
+ })
+ }
+
+ config := &Config{
+ Address: "127.0.0.1:8500",
+ Scheme: "http",
+ Transport: transportFn(),
+ }
+
+ if addr := os.Getenv(HTTPAddrEnvName); addr != "" {
+ config.Address = addr
+ }
+
+ if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" {
+ config.TokenFile = tokenFile
+ }
+
+ if token := os.Getenv(HTTPTokenEnvName); token != "" {
+ config.Token = token
+ }
+
+ if auth := os.Getenv(HTTPAuthEnvName); auth != "" {
+ var username, password string
+ if strings.Contains(auth, ":") {
+ split := strings.SplitN(auth, ":", 2)
+ username = split[0]
+ password = split[1]
+ } else {
+ username = auth
+ }
+
+ config.HttpAuth = &HttpBasicAuth{
+ Username: username,
+ Password: password,
+ }
+ }
+
+ if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" {
+ enabled, err := strconv.ParseBool(ssl)
+ if err != nil {
+ logger.Warn(fmt.Sprintf("could not parse %s", HTTPSSLEnvName), "error", err)
+ }
+
+ if enabled {
+ config.Scheme = "https"
+ }
+ }
+
+ if v := os.Getenv(HTTPTLSServerName); v != "" {
+ config.TLSConfig.Address = v
+ }
+ if v := os.Getenv(HTTPCAFile); v != "" {
+ config.TLSConfig.CAFile = v
+ }
+ if v := os.Getenv(HTTPCAPath); v != "" {
+ config.TLSConfig.CAPath = v
+ }
+ if v := os.Getenv(HTTPClientCert); v != "" {
+ config.TLSConfig.CertFile = v
+ }
+ if v := os.Getenv(HTTPClientKey); v != "" {
+ config.TLSConfig.KeyFile = v
+ }
+ if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" {
+ doVerify, err := strconv.ParseBool(v)
+ if err != nil {
+ logger.Warn(fmt.Sprintf("could not parse %s", HTTPSSLVerifyEnvName), "error", err)
+ }
+ if !doVerify {
+ config.TLSConfig.InsecureSkipVerify = true
+ }
+ }
+
+ if v := os.Getenv(HTTPNamespaceEnvName); v != "" {
+ config.Namespace = v
+ }
+
+ return config
+}
+
+// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
+// Consul using TLS.
+func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
+ tlsClientConfig := &tls.Config{
+ InsecureSkipVerify: tlsConfig.InsecureSkipVerify,
+ }
+
+ if tlsConfig.Address != "" {
+ server := tlsConfig.Address
+ hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]")
+ if hasPort {
+ var err error
+ server, _, err = net.SplitHostPort(server)
+ if err != nil {
+ return nil, err
+ }
+ }
+ tlsClientConfig.ServerName = server
+ }
+
+ if len(tlsConfig.CertPEM) != 0 && len(tlsConfig.KeyPEM) != 0 {
+ tlsCert, err := tls.X509KeyPair(tlsConfig.CertPEM, tlsConfig.KeyPEM)
+ if err != nil {
+ return nil, err
+ }
+ tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
+ } else if len(tlsConfig.CertPEM) != 0 || len(tlsConfig.KeyPEM) != 0 {
+ return nil, fmt.Errorf("both client cert and client key must be provided")
+ }
+
+ if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
+ tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
+ if err != nil {
+ return nil, err
+ }
+ tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
+ } else if tlsConfig.CertFile != "" || tlsConfig.KeyFile != "" {
+ return nil, fmt.Errorf("both client cert and client key must be provided")
+ }
+
+ if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" || len(tlsConfig.CAPem) != 0 {
+ rootConfig := &rootcerts.Config{
+ CAFile: tlsConfig.CAFile,
+ CAPath: tlsConfig.CAPath,
+ CACertificate: tlsConfig.CAPem,
+ }
+ if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil {
+ return nil, err
+ }
+ }
+
+ return tlsClientConfig, nil
+}
+
+func (c *Config) GenerateEnv() []string {
+ env := make([]string, 0, 10)
+
+ env = append(env,
+ fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
+ fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
+ fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile),
+ fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
+ fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
+ fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
+ fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile),
+ fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile),
+ fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address),
+ fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify))
+
+ if c.HttpAuth != nil {
+ env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password))
+ } else {
+ env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName))
+ }
+
+ return env
+}
+
+// Client provides a client to the Consul API
+type Client struct {
+ config Config
+}
+
+// NewClient returns a new client
+func NewClient(config *Config) (*Client, error) {
+ // bootstrap the config
+ defConfig := DefaultConfig()
+
+ if config.Address == "" {
+ config.Address = defConfig.Address
+ }
+
+ if config.Scheme == "" {
+ config.Scheme = defConfig.Scheme
+ }
+
+ if config.Transport == nil {
+ config.Transport = defConfig.Transport
+ }
+
+ if config.TLSConfig.Address == "" {
+ config.TLSConfig.Address = defConfig.TLSConfig.Address
+ }
+
+ if config.TLSConfig.CAFile == "" {
+ config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile
+ }
+
+ if config.TLSConfig.CAPath == "" {
+ config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath
+ }
+
+ if config.TLSConfig.CertFile == "" {
+ config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile
+ }
+
+ if config.TLSConfig.KeyFile == "" {
+ config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile
+ }
+
+ if !config.TLSConfig.InsecureSkipVerify {
+ config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify
+ }
+
+ if config.HttpClient == nil {
+ var err error
+ config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ parts := strings.SplitN(config.Address, "://", 2)
+ if len(parts) == 2 {
+ switch parts[0] {
+ case "http":
+ // Never revert to http if TLS was explicitly requested.
+ case "https":
+ config.Scheme = "https"
+ case "unix":
+ trans := cleanhttp.DefaultTransport()
+ trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
+ return net.Dial("unix", parts[1])
+ }
+ httpClient, err := NewHttpClient(trans, config.TLSConfig)
+ if err != nil {
+ return nil, err
+ }
+ config.HttpClient = httpClient
+ default:
+ return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0])
+ }
+ config.Address = parts[1]
+ }
+
+ // If the TokenFile is set, always use that, even if a Token is configured.
+ // This is because when TokenFile is set it is read into the Token field.
+ // We want any derived clients to have to re-read the token file.
+ if config.TokenFile != "" {
+ data, err := ioutil.ReadFile(config.TokenFile)
+ if err != nil {
+ return nil, fmt.Errorf("Error loading token file: %s", err)
+ }
+
+ if token := strings.TrimSpace(string(data)); token != "" {
+ config.Token = token
+ }
+ }
+ if config.Token == "" {
+ config.Token = defConfig.Token
+ }
+
+ return &Client{config: *config}, nil
+}
+
+// NewHttpClient returns an http client configured with the given Transport and TLS
+// config.
+func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) {
+ client := &http.Client{
+ Transport: transport,
+ }
+
+ // TODO (slackpad) - Once we get some run time on the HTTP/2 support we
+ // should turn it on by default if TLS is enabled. We would basically
+ // just need to call http2.ConfigureTransport(transport) here. We also
+ // don't want to introduce another external dependency on
+ // golang.org/x/net/http2 at this time. For a complete recipe for how
+ // to enable HTTP/2 support on a transport suitable for the API client
+ // library see agent/http_test.go:TestHTTPServer_H2.
+
+ if transport.TLSClientConfig == nil {
+ tlsClientConfig, err := SetupTLSConfig(&tlsConf)
+
+ if err != nil {
+ return nil, err
+ }
+
+ transport.TLSClientConfig = tlsClientConfig
+ }
+
+ return client, nil
+}
+
+// request is used to help build up a request
+type request struct {
+ config *Config
+ method string
+ url *url.URL
+ params url.Values
+ body io.Reader
+ header http.Header
+ obj interface{}
+ ctx context.Context
+}
+
+// setQueryOptions is used to annotate the request with
+// additional query options
+func (r *request) setQueryOptions(q *QueryOptions) {
+ if q == nil {
+ return
+ }
+ if q.Namespace != "" {
+ r.params.Set("ns", q.Namespace)
+ }
+ if q.Datacenter != "" {
+ r.params.Set("dc", q.Datacenter)
+ }
+ if q.AllowStale {
+ r.params.Set("stale", "")
+ }
+ if q.RequireConsistent {
+ r.params.Set("consistent", "")
+ }
+ if q.WaitIndex != 0 {
+ r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
+ }
+ if q.WaitTime != 0 {
+ r.params.Set("wait", durToMsec(q.WaitTime))
+ }
+ if q.WaitHash != "" {
+ r.params.Set("hash", q.WaitHash)
+ }
+ if q.Token != "" {
+ r.header.Set("X-Consul-Token", q.Token)
+ }
+ if q.Near != "" {
+ r.params.Set("near", q.Near)
+ }
+ if q.Filter != "" {
+ r.params.Set("filter", q.Filter)
+ }
+ if len(q.NodeMeta) > 0 {
+ for key, value := range q.NodeMeta {
+ r.params.Add("node-meta", key+":"+value)
+ }
+ }
+ if q.RelayFactor != 0 {
+ r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
+ }
+ if q.LocalOnly {
+ r.params.Set("local-only", fmt.Sprintf("%t", q.LocalOnly))
+ }
+ if q.Connect {
+ r.params.Set("connect", "true")
+ }
+ if q.UseCache && !q.RequireConsistent {
+ r.params.Set("cached", "")
+
+ cc := []string{}
+ if q.MaxAge > 0 {
+ cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds()))
+ }
+ if q.StaleIfError > 0 {
+ cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds()))
+ }
+ if len(cc) > 0 {
+ r.header.Set("Cache-Control", strings.Join(cc, ", "))
+ }
+ }
+
+ r.ctx = q.ctx
+}
+
+// durToMsec converts a duration to a millisecond specified string. If the
+// user selected a positive value that rounds to 0 ms, then we will use 1 ms
+// so they get a short delay, otherwise Consul will translate the 0 ms into
+// a huge default delay.
+func durToMsec(dur time.Duration) string {
+ ms := dur / time.Millisecond
+ if dur > 0 && ms == 0 {
+ ms = 1
+ }
+ return fmt.Sprintf("%dms", ms)
+}
+
+// serverError is a string we look for to detect 500 errors.
+const serverError = "Unexpected response code: 500"
+
+// IsRetryableError returns true for 500 errors from the Consul servers, and
+// network connection errors. These are usually retryable at a later time.
+// This applies to reads but NOT to writes. This may return true for errors
+// on writes that may have still gone through, so do not use this to retry
+// any write operations.
+func IsRetryableError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ if _, ok := err.(net.Error); ok {
+ return true
+ }
+
+ // TODO (slackpad) - Make a real error type here instead of using
+ // a string check.
+ return strings.Contains(err.Error(), serverError)
+}
+
+// setWriteOptions is used to annotate the request with
+// additional write options
+func (r *request) setWriteOptions(q *WriteOptions) {
+ if q == nil {
+ return
+ }
+ if q.Namespace != "" {
+ r.params.Set("ns", q.Namespace)
+ }
+ if q.Datacenter != "" {
+ r.params.Set("dc", q.Datacenter)
+ }
+ if q.Token != "" {
+ r.header.Set("X-Consul-Token", q.Token)
+ }
+ if q.RelayFactor != 0 {
+ r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
+ }
+ r.ctx = q.ctx
+}
+
+// toHTTP converts the request to an HTTP request
+func (r *request) toHTTP() (*http.Request, error) {
+ // Encode the query parameters
+ r.url.RawQuery = r.params.Encode()
+
+ // Check if we should encode the body
+ if r.body == nil && r.obj != nil {
+ b, err := encodeBody(r.obj)
+ if err != nil {
+ return nil, err
+ }
+ r.body = b
+ }
+
+ // Create the HTTP request
+ req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.URL.Host = r.url.Host
+ req.URL.Scheme = r.url.Scheme
+ req.Host = r.url.Host
+ req.Header = r.header
+
+ // Setup auth
+ if r.config.HttpAuth != nil {
+ req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
+ }
+ if r.ctx != nil {
+ return req.WithContext(r.ctx), nil
+ }
+
+ return req, nil
+}
+
+// newRequest is used to create a new request
+func (c *Client) newRequest(method, path string) *request {
+ r := &request{
+ config: &c.config,
+ method: method,
+ url: &url.URL{
+ Scheme: c.config.Scheme,
+ Host: c.config.Address,
+ Path: path,
+ },
+ params: make(map[string][]string),
+ header: make(http.Header),
+ }
+ if c.config.Datacenter != "" {
+ r.params.Set("dc", c.config.Datacenter)
+ }
+ if c.config.Namespace != "" {
+ r.params.Set("ns", c.config.Namespace)
+ }
+ if c.config.WaitTime != 0 {
+ r.params.Set("wait", durToMsec(r.config.WaitTime))
+ }
+ if c.config.Token != "" {
+ r.header.Set("X-Consul-Token", r.config.Token)
+ }
+ return r
+}
+
+// doRequest runs a request with our client
+func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
+ req, err := r.toHTTP()
+ if err != nil {
+ return 0, nil, err
+ }
+ start := time.Now()
+ resp, err := c.config.HttpClient.Do(req)
+ diff := time.Since(start)
+ return diff, resp, err
+}
+
+// Query is used to do a GET request against an endpoint
+// and deserialize the response into an interface using
+// standard Consul conventions.
+func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
+ r := c.newRequest("GET", endpoint)
+ r.setQueryOptions(q)
+ rtt, resp, err := c.doRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if err := decodeBody(resp, out); err != nil {
+ return nil, err
+ }
+ return qm, nil
+}
+
+// write is used to do a PUT request against an endpoint
+// and serialize/deserialized using the standard Consul conventions.
+func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
+ r := c.newRequest("PUT", endpoint)
+ r.setWriteOptions(q)
+ r.obj = in
+ rtt, resp, err := requireOK(c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ if out != nil {
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ } else if _, err := ioutil.ReadAll(resp.Body); err != nil {
+ return nil, err
+ }
+ return wm, nil
+}
+
+// parseQueryMeta is used to help parse query meta-data
+//
+// TODO(rb): bug? the error from this function is never handled
+func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
+ header := resp.Header
+
+ // Parse the X-Consul-Index (if it's set - hash based blocking queries don't
+ // set this)
+ if indexStr := header.Get("X-Consul-Index"); indexStr != "" {
+ index, err := strconv.ParseUint(indexStr, 10, 64)
+ if err != nil {
+ return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
+ }
+ q.LastIndex = index
+ }
+ q.LastContentHash = header.Get("X-Consul-ContentHash")
+
+ // Parse the X-Consul-LastContact
+ last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
+ if err != nil {
+ return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
+ }
+ q.LastContact = time.Duration(last) * time.Millisecond
+
+ // Parse the X-Consul-KnownLeader
+ switch header.Get("X-Consul-KnownLeader") {
+ case "true":
+ q.KnownLeader = true
+ default:
+ q.KnownLeader = false
+ }
+
+ // Parse X-Consul-Translate-Addresses
+ switch header.Get("X-Consul-Translate-Addresses") {
+ case "true":
+ q.AddressTranslationEnabled = true
+ default:
+ q.AddressTranslationEnabled = false
+ }
+
+ // Parse Cache info
+ if cacheStr := header.Get("X-Cache"); cacheStr != "" {
+ q.CacheHit = strings.EqualFold(cacheStr, "HIT")
+ }
+ if ageStr := header.Get("Age"); ageStr != "" {
+ age, err := strconv.ParseUint(ageStr, 10, 64)
+ if err != nil {
+ return fmt.Errorf("Failed to parse Age Header: %v", err)
+ }
+ q.CacheAge = time.Duration(age) * time.Second
+ }
+
+ return nil
+}
+
+// decodeBody is used to JSON decode a body
+func decodeBody(resp *http.Response, out interface{}) error {
+ dec := json.NewDecoder(resp.Body)
+ return dec.Decode(out)
+}
+
+// encodeBody is used to encode a request body
+func encodeBody(obj interface{}) (io.Reader, error) {
+ buf := bytes.NewBuffer(nil)
+ enc := json.NewEncoder(buf)
+ if err := enc.Encode(obj); err != nil {
+ return nil, err
+ }
+ return buf, nil
+}
+
+// requireOK is used to wrap doRequest and check for a 200
+func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
+ if e != nil {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ return d, nil, e
+ }
+ if resp.StatusCode != 200 {
+ return d, nil, generateUnexpectedResponseCodeError(resp)
+ }
+ return d, resp, nil
+}
+
+func (req *request) filterQuery(filter string) {
+ if filter == "" {
+ return
+ }
+
+ req.params.Set("filter", filter)
+}
+
+// generateUnexpectedResponseCodeError consumes the rest of the body, closes
+// the body stream and generates an error indicating the status code was
+// unexpected.
+func generateUnexpectedResponseCodeError(resp *http.Response) error {
+ var buf bytes.Buffer
+ io.Copy(&buf, resp.Body)
+ resp.Body.Close()
+ return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+}
+
+func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
+ if e != nil {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ return false, d, nil, e
+ }
+ switch resp.StatusCode {
+ case 200:
+ return true, d, resp, nil
+ case 404:
+ return false, d, resp, nil
+ default:
+ return false, d, nil, generateUnexpectedResponseCodeError(resp)
+ }
+}
diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go
new file mode 100644
index 0000000..607d5d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/catalog.go
@@ -0,0 +1,337 @@
+package api
+
+import (
+ "net"
+ "strconv"
+)
+
+type Weights struct {
+ Passing int
+ Warning int
+}
+
+type Node struct {
+ ID string
+ Node string
+ Address string
+ Datacenter string
+ TaggedAddresses map[string]string
+ Meta map[string]string
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+type ServiceAddress struct {
+ Address string
+ Port int
+}
+
+type CatalogService struct {
+ ID string
+ Node string
+ Address string
+ Datacenter string
+ TaggedAddresses map[string]string
+ NodeMeta map[string]string
+ ServiceID string
+ ServiceName string
+ ServiceAddress string
+ ServiceTaggedAddresses map[string]ServiceAddress
+ ServiceTags []string
+ ServiceMeta map[string]string
+ ServicePort int
+ ServiceWeights Weights
+ ServiceEnableTagOverride bool
+ ServiceProxy *AgentServiceConnectProxyConfig
+ CreateIndex uint64
+ Checks HealthChecks
+ ModifyIndex uint64
+ Namespace string `json:",omitempty"`
+}
+
+type CatalogNode struct {
+ Node *Node
+ Services map[string]*AgentService
+}
+
+type CatalogNodeServiceList struct {
+ Node *Node
+ Services []*AgentService
+}
+
+type CatalogRegistration struct {
+ ID string
+ Node string
+ Address string
+ TaggedAddresses map[string]string
+ NodeMeta map[string]string
+ Datacenter string
+ Service *AgentService
+ Check *AgentCheck
+ Checks HealthChecks
+ SkipNodeUpdate bool
+}
+
+type CatalogDeregistration struct {
+ Node string
+ Address string `json:",omitempty"` // Obsolete.
+ Datacenter string
+ ServiceID string
+ CheckID string
+ Namespace string `json:",omitempty"`
+}
+
+type CompoundServiceName struct {
+ Name string
+
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+// GatewayService associates a gateway with a linked service.
+// It also contains service-specific gateway configuration like ingress listener port and protocol.
+type GatewayService struct {
+ Gateway CompoundServiceName
+ Service CompoundServiceName
+ GatewayKind ServiceKind
+ Port int `json:",omitempty"`
+ Protocol string `json:",omitempty"`
+ Hosts []string `json:",omitempty"`
+ CAFile string `json:",omitempty"`
+ CertFile string `json:",omitempty"`
+ KeyFile string `json:",omitempty"`
+ SNI string `json:",omitempty"`
+ FromWildcard bool `json:",omitempty"`
+}
+
+// Catalog can be used to query the Catalog endpoints
+type Catalog struct {
+ c *Client
+}
+
+// Catalog returns a handle to the catalog endpoints
+func (c *Client) Catalog() *Catalog {
+ return &Catalog{c}
+}
+
+func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("PUT", "/v1/catalog/register")
+ r.setWriteOptions(q)
+ r.obj = reg
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ return wm, nil
+}
+
+func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("PUT", "/v1/catalog/deregister")
+ r.setWriteOptions(q)
+ r.obj = dereg
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ return wm, nil
+}
+
+// Datacenters is used to query for all the known datacenters
+func (c *Catalog) Datacenters() ([]string, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/datacenters")
+ _, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []string
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Nodes is used to query all the known nodes
+func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/nodes")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*Node
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Services is used to query for all known services
+func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/services")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out map[string][]string
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Service is used to query catalog entries for a given service
+func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+ var tags []string
+ if tag != "" {
+ tags = []string{tag}
+ }
+ return c.service(service, tags, q, false)
+}
+
+// Supports multiple tags for filtering
+func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+ return c.service(service, tags, q, false)
+}
+
+// Connect is used to query catalog entries for a given Connect-enabled service
+func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+ var tags []string
+ if tag != "" {
+ tags = []string{tag}
+ }
+ return c.service(service, tags, q, true)
+}
+
+// Supports multiple tags for filtering
+func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+ return c.service(service, tags, q, true)
+}
+
+func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
+ path := "/v1/catalog/service/" + service
+ if connect {
+ path = "/v1/catalog/connect/" + service
+ }
+ r := c.c.newRequest("GET", path)
+ r.setQueryOptions(q)
+ if len(tags) > 0 {
+ for _, tag := range tags {
+ r.params.Add("tag", tag)
+ }
+ }
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*CatalogService
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Node is used to query for service information about a single node
+func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out *CatalogNode
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// NodeServiceList is used to query for service information about a single node. It differs from
+// the Node function only in its return type which will contain a list of services as opposed to
+// a map of service ids to services. This different structure allows for using the wildcard specifier
+// '*' for the Namespace in the QueryOptions.
+func (c *Catalog) NodeServiceList(node string, q *QueryOptions) (*CatalogNodeServiceList, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/node-services/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out *CatalogNodeServiceList
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// GatewayServices is used to query the services associated with an ingress gateway or terminating gateway.
+func (c *Catalog) GatewayServices(gateway string, q *QueryOptions) ([]*GatewayService, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/catalog/gateway-services/"+gateway)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*GatewayService
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+func ParseServiceAddr(addrPort string) (ServiceAddress, error) {
+ port := 0
+ host, portStr, err := net.SplitHostPort(addrPort)
+ if err == nil {
+ port, err = strconv.Atoi(portStr)
+ }
+ return ServiceAddress{Address: host, Port: port}, err
+}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go
new file mode 100644
index 0000000..a234f6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/config_entry.go
@@ -0,0 +1,355 @@
+package api
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+const (
+ ServiceDefaults string = "service-defaults"
+ ProxyDefaults string = "proxy-defaults"
+ ServiceRouter string = "service-router"
+ ServiceSplitter string = "service-splitter"
+ ServiceResolver string = "service-resolver"
+ IngressGateway string = "ingress-gateway"
+ TerminatingGateway string = "terminating-gateway"
+
+ ProxyConfigGlobal string = "global"
+)
+
+type ConfigEntry interface {
+ GetKind() string
+ GetName() string
+ GetCreateIndex() uint64
+ GetModifyIndex() uint64
+}
+
+type MeshGatewayMode string
+
+const (
+ // MeshGatewayModeDefault represents no specific mode and should
+ // be used to indicate that a different layer of the configuration
+ // chain should take precedence
+ MeshGatewayModeDefault MeshGatewayMode = ""
+
+ // MeshGatewayModeNone represents that the Upstream Connect connections
+ // should be direct and not flow through a mesh gateway.
+ MeshGatewayModeNone MeshGatewayMode = "none"
+
+ // MeshGatewayModeLocal represents that the Upstrea Connect connections
+ // should be made to a mesh gateway in the local datacenter. This is
+ MeshGatewayModeLocal MeshGatewayMode = "local"
+
+ // MeshGatewayModeRemote represents that the Upstream Connect connections
+ // should be made to a mesh gateway in a remote datacenter.
+ MeshGatewayModeRemote MeshGatewayMode = "remote"
+)
+
+// MeshGatewayConfig controls how Mesh Gateways are used for upstream Connect
+// services
+type MeshGatewayConfig struct {
+ // Mode is the mode that should be used for the upstream connection.
+ Mode MeshGatewayMode `json:",omitempty"`
+}
+
+// ExposeConfig describes HTTP paths to expose through Envoy outside of Connect.
+// Users can expose individual paths and/or all HTTP/GRPC paths for checks.
+type ExposeConfig struct {
+ // Checks defines whether paths associated with Consul checks will be exposed.
+ // This flag triggers exposing all HTTP and GRPC check paths registered for the service.
+ Checks bool `json:",omitempty"`
+
+ // Paths is the list of paths exposed through the proxy.
+ Paths []ExposePath `json:",omitempty"`
+}
+
+type ExposePath struct {
+ // ListenerPort defines the port of the proxy's listener for exposed paths.
+ ListenerPort int `json:",omitempty" alias:"listener_port"`
+
+ // Path is the path to expose through the proxy, ie. "/metrics."
+ Path string `json:",omitempty"`
+
+ // LocalPathPort is the port that the service is listening on for the given path.
+ LocalPathPort int `json:",omitempty" alias:"local_path_port"`
+
+ // Protocol describes the upstream's service protocol.
+ // Valid values are "http" and "http2", defaults to "http"
+ Protocol string `json:",omitempty"`
+
+ // ParsedFromCheck is set if this path was parsed from a registered check
+ ParsedFromCheck bool
+}
+
+type ServiceConfigEntry struct {
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+ Protocol string `json:",omitempty"`
+ MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
+ Expose ExposeConfig `json:",omitempty"`
+ ExternalSNI string `json:",omitempty" alias:"external_sni"`
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+func (s *ServiceConfigEntry) GetKind() string {
+ return s.Kind
+}
+
+func (s *ServiceConfigEntry) GetName() string {
+ return s.Name
+}
+
+func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
+ return s.CreateIndex
+}
+
+func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
+ return s.ModifyIndex
+}
+
+type ProxyConfigEntry struct {
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+ Config map[string]interface{} `json:",omitempty"`
+ MeshGateway MeshGatewayConfig `json:",omitempty" alias:"mesh_gateway"`
+ Expose ExposeConfig `json:",omitempty"`
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+func (p *ProxyConfigEntry) GetKind() string {
+ return p.Kind
+}
+
+func (p *ProxyConfigEntry) GetName() string {
+ return p.Name
+}
+
+func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
+ return p.CreateIndex
+}
+
+func (p *ProxyConfigEntry) GetModifyIndex() uint64 {
+ return p.ModifyIndex
+}
+
+func makeConfigEntry(kind, name string) (ConfigEntry, error) {
+ switch kind {
+ case ServiceDefaults:
+ return &ServiceConfigEntry{Kind: kind, Name: name}, nil
+ case ProxyDefaults:
+ return &ProxyConfigEntry{Kind: kind, Name: name}, nil
+ case ServiceRouter:
+ return &ServiceRouterConfigEntry{Kind: kind, Name: name}, nil
+ case ServiceSplitter:
+ return &ServiceSplitterConfigEntry{Kind: kind, Name: name}, nil
+ case ServiceResolver:
+ return &ServiceResolverConfigEntry{Kind: kind, Name: name}, nil
+ case IngressGateway:
+ return &IngressGatewayConfigEntry{Kind: kind, Name: name}, nil
+ case TerminatingGateway:
+ return &TerminatingGatewayConfigEntry{Kind: kind, Name: name}, nil
+ default:
+ return nil, fmt.Errorf("invalid config entry kind: %s", kind)
+ }
+}
+
+func MakeConfigEntry(kind, name string) (ConfigEntry, error) {
+ return makeConfigEntry(kind, name)
+}
+
+// DecodeConfigEntry will decode the result of using json.Unmarshal of a config
+// entry into a map[string]interface{}.
+//
+// Important caveats:
+//
+// - This will NOT work if the map[string]interface{} was produced using HCL
+// decoding as that requires more extensive parsing to work around the issues
+// with map[string][]interface{} that arise.
+//
+// - This will only decode fields using their camel case json field
+// representations.
+func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
+ var entry ConfigEntry
+
+ kindVal, ok := raw["Kind"]
+ if !ok {
+ kindVal, ok = raw["kind"]
+ }
+ if !ok {
+ return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level")
+ }
+
+ if kindStr, ok := kindVal.(string); ok {
+ newEntry, err := makeConfigEntry(kindStr, "")
+ if err != nil {
+ return nil, err
+ }
+ entry = newEntry
+ } else {
+ return nil, fmt.Errorf("Kind value in payload is not a string")
+ }
+
+ decodeConf := &mapstructure.DecoderConfig{
+ DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
+ Result: &entry,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := mapstructure.NewDecoder(decodeConf)
+ if err != nil {
+ return nil, err
+ }
+
+ return entry, decoder.Decode(raw)
+}
+
+func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) {
+ var raw map[string]interface{}
+ if err := json.Unmarshal(data, &raw); err != nil {
+ return nil, err
+ }
+
+ return DecodeConfigEntry(raw)
+}
+
+func decodeConfigEntrySlice(raw []map[string]interface{}) ([]ConfigEntry, error) {
+ var entries []ConfigEntry
+ for _, rawEntry := range raw {
+ entry, err := DecodeConfigEntry(rawEntry)
+ if err != nil {
+ return nil, err
+ }
+ entries = append(entries, entry)
+ }
+ return entries, nil
+}
+
+// ConfigEntries can be used to query the Config endpoints
+type ConfigEntries struct {
+ c *Client
+}
+
+// Config returns a handle to the Config endpoints
+func (c *Client) ConfigEntries() *ConfigEntries {
+ return &ConfigEntries{c}
+}
+
+func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) {
+ if kind == "" || name == "" {
+ return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty")
+ }
+
+ entry, err := makeConfigEntry(kind, name)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name))
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(conf.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if err := decodeBody(resp, entry); err != nil {
+ return nil, nil, err
+ }
+
+ return entry, qm, nil
+}
+
+func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) {
+ if kind == "" {
+ return nil, nil, fmt.Errorf("The kind parameter must not be empty")
+ }
+
+ r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind))
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(conf.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var raw []map[string]interface{}
+ if err := decodeBody(resp, &raw); err != nil {
+ return nil, nil, err
+ }
+
+ entries, err := decodeConfigEntrySlice(raw)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return entries, qm, nil
+}
+
+func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) {
+ return conf.set(entry, nil, w)
+}
+
+func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) {
+ return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w)
+}
+
+func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) {
+ r := conf.c.newRequest("PUT", "/v1/config")
+ r.setWriteOptions(w)
+ for param, value := range params {
+ r.params.Set(param, value)
+ }
+ r.obj = entry
+ rtt, resp, err := requireOK(conf.c.doRequest(r))
+ if err != nil {
+ return false, nil, err
+ }
+ defer resp.Body.Close()
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, resp.Body); err != nil {
+ return false, nil, fmt.Errorf("Failed to read response: %v", err)
+ }
+ res := strings.Contains(buf.String(), "true")
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return res, wm, nil
+}
+
+func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) {
+ if kind == "" || name == "" {
+ return nil, fmt.Errorf("Both kind and name parameters must not be empty")
+ }
+
+ r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name))
+ r.setWriteOptions(w)
+ rtt, resp, err := requireOK(conf.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go
new file mode 100644
index 0000000..2091063
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go
@@ -0,0 +1,206 @@
+package api
+
+import (
+ "encoding/json"
+ "time"
+)
+
+type ServiceRouterConfigEntry struct {
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+
+ Routes []ServiceRoute `json:",omitempty"`
+
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind }
+func (e *ServiceRouterConfigEntry) GetName() string { return e.Name }
+func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
+func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
+
+type ServiceRoute struct {
+ Match *ServiceRouteMatch `json:",omitempty"`
+ Destination *ServiceRouteDestination `json:",omitempty"`
+}
+
+type ServiceRouteMatch struct {
+ HTTP *ServiceRouteHTTPMatch `json:",omitempty"`
+}
+
+type ServiceRouteHTTPMatch struct {
+ PathExact string `json:",omitempty" alias:"path_exact"`
+ PathPrefix string `json:",omitempty" alias:"path_prefix"`
+ PathRegex string `json:",omitempty" alias:"path_regex"`
+
+ Header []ServiceRouteHTTPMatchHeader `json:",omitempty"`
+ QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty" alias:"query_param"`
+ Methods []string `json:",omitempty"`
+}
+
+type ServiceRouteHTTPMatchHeader struct {
+ Name string
+ Present bool `json:",omitempty"`
+ Exact string `json:",omitempty"`
+ Prefix string `json:",omitempty"`
+ Suffix string `json:",omitempty"`
+ Regex string `json:",omitempty"`
+ Invert bool `json:",omitempty"`
+}
+
+type ServiceRouteHTTPMatchQueryParam struct {
+ Name string
+ Present bool `json:",omitempty"`
+ Exact string `json:",omitempty"`
+ Regex string `json:",omitempty"`
+}
+
+type ServiceRouteDestination struct {
+ Service string `json:",omitempty"`
+ ServiceSubset string `json:",omitempty" alias:"service_subset"`
+ Namespace string `json:",omitempty"`
+ PrefixRewrite string `json:",omitempty" alias:"prefix_rewrite"`
+ RequestTimeout time.Duration `json:",omitempty" alias:"request_timeout"`
+ NumRetries uint32 `json:",omitempty" alias:"num_retries"`
+ RetryOnConnectFailure bool `json:",omitempty" alias:"retry_on_connect_failure"`
+ RetryOnStatusCodes []uint32 `json:",omitempty" alias:"retry_on_status_codes"`
+}
+
+func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) {
+ type Alias ServiceRouteDestination
+ exported := &struct {
+ RequestTimeout string `json:",omitempty"`
+ *Alias
+ }{
+ RequestTimeout: e.RequestTimeout.String(),
+ Alias: (*Alias)(e),
+ }
+ if e.RequestTimeout == 0 {
+ exported.RequestTimeout = ""
+ }
+
+ return json.Marshal(exported)
+}
+
+func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error {
+ type Alias ServiceRouteDestination
+ aux := &struct {
+ RequestTimeout string
+ *Alias
+ }{
+ Alias: (*Alias)(e),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ var err error
+ if aux.RequestTimeout != "" {
+ if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type ServiceSplitterConfigEntry struct {
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+
+ Splits []ServiceSplit `json:",omitempty"`
+
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind }
+func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name }
+func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
+func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
+
+type ServiceSplit struct {
+ Weight float32
+ Service string `json:",omitempty"`
+ ServiceSubset string `json:",omitempty" alias:"service_subset"`
+ Namespace string `json:",omitempty"`
+}
+
+type ServiceResolverConfigEntry struct {
+ Kind string
+ Name string
+ Namespace string `json:",omitempty"`
+
+ DefaultSubset string `json:",omitempty" alias:"default_subset"`
+ Subsets map[string]ServiceResolverSubset `json:",omitempty"`
+ Redirect *ServiceResolverRedirect `json:",omitempty"`
+ Failover map[string]ServiceResolverFailover `json:",omitempty"`
+ ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"`
+
+ Meta map[string]string `json:",omitempty"`
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) {
+ type Alias ServiceResolverConfigEntry
+ exported := &struct {
+ ConnectTimeout string `json:",omitempty"`
+ *Alias
+ }{
+ ConnectTimeout: e.ConnectTimeout.String(),
+ Alias: (*Alias)(e),
+ }
+ if e.ConnectTimeout == 0 {
+ exported.ConnectTimeout = ""
+ }
+
+ return json.Marshal(exported)
+}
+
+func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error {
+ type Alias ServiceResolverConfigEntry
+ aux := &struct {
+ ConnectTimeout string
+ *Alias
+ }{
+ Alias: (*Alias)(e),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ var err error
+ if aux.ConnectTimeout != "" {
+ if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind }
+func (e *ServiceResolverConfigEntry) GetName() string { return e.Name }
+func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
+func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
+
+type ServiceResolverSubset struct {
+ Filter string `json:",omitempty"`
+ OnlyPassing bool `json:",omitempty" alias:"only_passing"`
+}
+
+type ServiceResolverRedirect struct {
+ Service string `json:",omitempty"`
+ ServiceSubset string `json:",omitempty" alias:"service_subset"`
+ Namespace string `json:",omitempty"`
+ Datacenter string `json:",omitempty"`
+}
+
+type ServiceResolverFailover struct {
+ Service string `json:",omitempty"`
+ ServiceSubset string `json:",omitempty" alias:"service_subset"`
+ Namespace string `json:",omitempty"`
+ Datacenters []string `json:",omitempty"`
+}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go
new file mode 100644
index 0000000..e259427
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go
@@ -0,0 +1,174 @@
+package api
+
+// IngressGatewayConfigEntry manages the configuration for an ingress service
+// with the given name.
+type IngressGatewayConfigEntry struct {
+ // Kind of the config entry. This should be set to api.IngressGateway.
+ Kind string
+
+ // Name is used to match the config entry with its associated ingress gateway
+ // service. This should match the name provided in the service definition.
+ Name string
+
+ // Namespace is the namespace the IngressGateway is associated with
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+
+ // TLS holds the TLS configuration for this gateway.
+ TLS GatewayTLSConfig
+
+ // Listeners declares what ports the ingress gateway should listen on, and
+ // what services to associated to those ports.
+ Listeners []IngressListener
+
+ Meta map[string]string `json:",omitempty"`
+
+ // CreateIndex is the Raft index this entry was created at. This is a
+ // read-only field.
+ CreateIndex uint64
+
+ // ModifyIndex is used for the Check-And-Set operations and can also be fed
+ // back into the WaitIndex of the QueryOptions in order to perform blocking
+ // queries.
+ ModifyIndex uint64
+}
+
+type GatewayTLSConfig struct {
+ // Indicates that TLS should be enabled for this gateway service
+ Enabled bool
+}
+
+// IngressListener manages the configuration for a listener on a specific port.
+type IngressListener struct {
+ // Port declares the port on which the ingress gateway should listen for traffic.
+ Port int
+
+ // Protocol declares what type of traffic this listener is expected to
+ // receive. Depending on the protocol, a listener might support multiplexing
+ // services over a single port, or additional discovery chain features. The
+ // current supported values are: (tcp | http | http2 | grpc).
+ Protocol string
+
+ // Services declares the set of services to which the listener forwards
+ // traffic.
+ //
+ // For "tcp" protocol listeners, only a single service is allowed.
+ // For "http" listeners, multiple services can be declared.
+ Services []IngressService
+}
+
+// IngressService manages configuration for services that are exposed to
+// ingress traffic.
+type IngressService struct {
+ // Name declares the service to which traffic should be forwarded.
+ //
+ // This can either be a specific service, or the wildcard specifier,
+ // "*". If the wildcard specifier is provided, the listener must be of "http"
+ // protocol and means that the listener will forward traffic to all services.
+ //
+ // A name can be specified on multiple listeners, and will be exposed on both
+ // of the listeners
+ Name string
+
+ // Hosts is a list of hostnames which should be associated to this service on
+ // the defined listener. Only allowed on layer 7 protocols, this will be used
+ // to route traffic to the service by matching the Host header of the HTTP
+ // request.
+ //
+ // If a host is provided for a service that also has a wildcard specifier
+ // defined, the host will override the wildcard-specifier-provided
+ // ".*" domain for that listener.
+ //
+ // This cannot be specified when using the wildcard specifier, "*", or when
+ // using a "tcp" listener.
+ Hosts []string
+
+ // Namespace is the namespace where the service is located.
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+func (i *IngressGatewayConfigEntry) GetKind() string {
+ return i.Kind
+}
+
+func (i *IngressGatewayConfigEntry) GetName() string {
+ return i.Name
+}
+
+func (i *IngressGatewayConfigEntry) GetCreateIndex() uint64 {
+ return i.CreateIndex
+}
+
+func (i *IngressGatewayConfigEntry) GetModifyIndex() uint64 {
+ return i.ModifyIndex
+}
+
+// TerminatingGatewayConfigEntry manages the configuration for a terminating gateway
+// with the given name.
+type TerminatingGatewayConfigEntry struct {
+ // Kind of the config entry. This should be set to api.TerminatingGateway.
+ Kind string
+
+ // Name is used to match the config entry with its associated terminating gateway
+ // service. This should match the name provided in the service definition.
+ Name string
+
+ // Services is a list of service names represented by the terminating gateway.
+ Services []LinkedService `json:",omitempty"`
+
+ Meta map[string]string `json:",omitempty"`
+
+ // CreateIndex is the Raft index this entry was created at. This is a
+ // read-only field.
+ CreateIndex uint64
+
+ // ModifyIndex is used for the Check-And-Set operations and can also be fed
+ // back into the WaitIndex of the QueryOptions in order to perform blocking
+ // queries.
+ ModifyIndex uint64
+
+ // Namespace is the namespace the config entry is associated with
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+// A LinkedService is a service represented by a terminating gateway
+type LinkedService struct {
+ // The namespace the service is registered in
+ Namespace string `json:",omitempty"`
+
+ // Name is the name of the service, as defined in Consul's catalog
+ Name string `json:",omitempty"`
+
+ // CAFile is the optional path to a CA certificate to use for TLS connections
+ // from the gateway to the linked service
+ CAFile string `json:",omitempty" alias:"ca_file"`
+
+ // CertFile is the optional path to a client certificate to use for TLS connections
+ // from the gateway to the linked service
+ CertFile string `json:",omitempty" alias:"cert_file"`
+
+ // KeyFile is the optional path to a private key to use for TLS connections
+ // from the gateway to the linked service
+ KeyFile string `json:",omitempty" alias:"key_file"`
+
+ // SNI is the optional name to specify during the TLS handshake with a linked service
+ SNI string `json:",omitempty"`
+}
+
+func (g *TerminatingGatewayConfigEntry) GetKind() string {
+ return g.Kind
+}
+
+func (g *TerminatingGatewayConfigEntry) GetName() string {
+ return g.Name
+}
+
+func (g *TerminatingGatewayConfigEntry) GetCreateIndex() uint64 {
+ return g.CreateIndex
+}
+
+func (g *TerminatingGatewayConfigEntry) GetModifyIndex() uint64 {
+ return g.ModifyIndex
+}
diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go
new file mode 100644
index 0000000..a40d1e2
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/connect.go
@@ -0,0 +1,12 @@
+package api
+
+// Connect can be used to work with endpoints related to Connect, the
+// feature for securely connecting services within Consul.
+type Connect struct {
+ c *Client
+}
+
+// Connect returns a handle to the connect-related endpoints
+func (c *Client) Connect() *Connect {
+ return &Connect{c}
+}
diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go
new file mode 100644
index 0000000..26a7bfb
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go
@@ -0,0 +1,181 @@
+package api
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+// CAConfig is the structure for the Connect CA configuration.
+type CAConfig struct {
+ // Provider is the CA provider implementation to use.
+ Provider string
+
+ // Configuration is arbitrary configuration for the provider. This
+ // should only contain primitive values and containers (such as lists
+ // and maps).
+ Config map[string]interface{}
+
+ // State is read-only data that the provider might have persisted for use
+ // after restart or leadership transition. For example this might include
+ // UUIDs of resources it has created. Setting this when writing a
+ // configuration is an error.
+ State map[string]string
+
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+// CommonCAProviderConfig is the common options available to all CA providers.
+type CommonCAProviderConfig struct {
+ LeafCertTTL time.Duration
+ SkipValidate bool
+ CSRMaxPerSecond float32
+ CSRMaxConcurrent int
+}
+
+// ConsulCAProviderConfig is the config for the built-in Consul CA provider.
+type ConsulCAProviderConfig struct {
+ CommonCAProviderConfig `mapstructure:",squash"`
+
+ PrivateKey string
+ RootCert string
+ RotationPeriod time.Duration
+ IntermediateCertTTL time.Duration
+}
+
+// ParseConsulCAConfig takes a raw config map and returns a parsed
+// ConsulCAProviderConfig.
+func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) {
+ var config ConsulCAProviderConfig
+ decodeConf := &mapstructure.DecoderConfig{
+ DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
+ Result: &config,
+ WeaklyTypedInput: true,
+ }
+
+ decoder, err := mapstructure.NewDecoder(decodeConf)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := decoder.Decode(raw); err != nil {
+ return nil, fmt.Errorf("error decoding config: %s", err)
+ }
+
+ return &config, nil
+}
+
+// CARootList is the structure for the results of listing roots.
+type CARootList struct {
+ ActiveRootID string
+ TrustDomain string
+ Roots []*CARoot
+}
+
+// CARoot represents a root CA certificate that is trusted.
+type CARoot struct {
+ // ID is a globally unique ID (UUID) representing this CA root.
+ ID string
+
+ // Name is a human-friendly name for this CA root. This value is
+ // opaque to Consul and is not used for anything internally.
+ Name string
+
+ // RootCertPEM is the PEM-encoded public certificate.
+ RootCertPEM string `json:"RootCert"`
+
+ // Active is true if this is the current active CA. This must only
+ // be true for exactly one CA. For any method that modifies roots in the
+ // state store, tests should be written to verify that multiple roots
+ // cannot be active.
+ Active bool
+
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+// LeafCert is a certificate that has been issued by a Connect CA.
+type LeafCert struct {
+ // SerialNumber is the unique serial number for this certificate.
+ // This is encoded in standard hex separated by :.
+ SerialNumber string
+
+ // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private
+ // key for that cert, respectively. This should not be stored in the
+ // state store, but is present in the sign API response.
+ CertPEM string `json:",omitempty"`
+ PrivateKeyPEM string `json:",omitempty"`
+
+ // Service is the name of the service for which the cert was issued.
+ // ServiceURI is the cert URI value.
+ Service string
+ ServiceURI string
+
+ // ValidAfter and ValidBefore are the validity periods for the
+ // certificate.
+ ValidAfter time.Time
+ ValidBefore time.Time
+
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+// CARoots queries the list of available roots.
+func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/connect/ca/roots")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out CARootList
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, qm, nil
+}
+
+// CAGetConfig returns the current CA configuration.
+func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/connect/ca/configuration")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out CAConfig
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, qm, nil
+}
+
+// CASetConfig sets the current CA configuration.
+func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) {
+ r := h.c.newRequest("PUT", "/v1/connect/ca/configuration")
+ r.setWriteOptions(q)
+ r.obj = conf
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+ return wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go
new file mode 100644
index 0000000..3db177c
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go
@@ -0,0 +1,309 @@
+package api
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "time"
+)
+
+// Intention defines an intention for the Connect Service Graph. This defines
+// the allowed or denied behavior of a connection between two services using
+// Connect.
+type Intention struct {
+ // ID is the UUID-based ID for the intention, always generated by Consul.
+ ID string
+
+ // Description is a human-friendly description of this intention.
+ // It is opaque to Consul and is only stored and transferred in API
+ // requests.
+ Description string
+
+ // SourceNS, SourceName are the namespace and name, respectively, of
+ // the source service. Either of these may be the wildcard "*", but only
+ // the full value can be a wildcard. Partial wildcards are not allowed.
+ // The source may also be a non-Consul service, as specified by SourceType.
+ //
+ // DestinationNS, DestinationName is the same, but for the destination
+ // service. The same rules apply. The destination is always a Consul
+ // service.
+ SourceNS, SourceName string
+ DestinationNS, DestinationName string
+
+ // SourceType is the type of the value for the source.
+ SourceType IntentionSourceType
+
+ // Action is whether this is an allowlist or denylist intention.
+ Action IntentionAction
+
+ // DefaultAddr, DefaultPort of the local listening proxy (if any) to
+ // make this connection.
+ DefaultAddr string
+ DefaultPort int
+
+ // Meta is arbitrary metadata associated with the intention. This is
+ // opaque to Consul but is served in API responses.
+ Meta map[string]string
+
+ // Precedence is the order that the intention will be applied, with
+ // larger numbers being applied first. This is a read-only field, on
+ // any intention update it is updated.
+ Precedence int
+
+ // CreatedAt and UpdatedAt keep track of when this record was created
+ // or modified.
+ CreatedAt, UpdatedAt time.Time
+
+ // Hash of the contents of the intention
+ //
+ // This is needed mainly for replication purposes. When replicating from
+ // one DC to another keeping the content Hash will allow us to detect
+ // content changes more efficiently than checking every single field
+ Hash []byte
+
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+// String returns human-friendly output describing ths intention.
+func (i *Intention) String() string {
+ return fmt.Sprintf("%s => %s (%s)",
+ i.SourceString(),
+ i.DestinationString(),
+ i.Action)
+}
+
+// SourceString returns the namespace/name format for the source, or
+// just "name" if the namespace is the default namespace.
+func (i *Intention) SourceString() string {
+ return i.partString(i.SourceNS, i.SourceName)
+}
+
+// DestinationString returns the namespace/name format for the source, or
+// just "name" if the namespace is the default namespace.
+func (i *Intention) DestinationString() string {
+ return i.partString(i.DestinationNS, i.DestinationName)
+}
+
+func (i *Intention) partString(ns, n string) string {
+ // For now we omit the default namespace from the output. In the future
+ // we might want to look at this and show this in a multi-namespace world.
+ if ns != "" && ns != IntentionDefaultNamespace {
+ n = ns + "/" + n
+ }
+
+ return n
+}
+
+// IntentionDefaultNamespace is the default namespace value.
+const IntentionDefaultNamespace = "default"
+
+// IntentionAction is the action that the intention represents. This
+// can be "allow" or "deny" to allowlist or denylist intentions.
+type IntentionAction string
+
+const (
+ IntentionActionAllow IntentionAction = "allow"
+ IntentionActionDeny IntentionAction = "deny"
+)
+
+// IntentionSourceType is the type of the source within an intention.
+type IntentionSourceType string
+
+const (
+ // IntentionSourceConsul is a service within the Consul catalog.
+ IntentionSourceConsul IntentionSourceType = "consul"
+)
+
+// IntentionMatch are the arguments for the intention match API.
+type IntentionMatch struct {
+ By IntentionMatchType
+ Names []string
+}
+
+// IntentionMatchType is the target for a match request. For example,
+// matching by source will look for all intentions that match the given
+// source value.
+type IntentionMatchType string
+
+const (
+ IntentionMatchSource IntentionMatchType = "source"
+ IntentionMatchDestination IntentionMatchType = "destination"
+)
+
+// IntentionCheck are the arguments for the intention check API. For
+// more documentation see the IntentionCheck function.
+type IntentionCheck struct {
+ // Source and Destination are the source and destination values to
+ // check. The destination is always a Consul service, but the source
+ // may be other values as defined by the SourceType.
+ Source, Destination string
+
+ // SourceType is the type of the value for the source.
+ SourceType IntentionSourceType
+}
+
+// Intentions returns the list of intentions.
+func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/connect/intentions")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*Intention
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// IntentionGet retrieves a single intention.
+func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/connect/intentions/"+id)
+ r.setQueryOptions(q)
+ rtt, resp, err := h.c.doRequest(r)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if resp.StatusCode == 404 {
+ return nil, qm, nil
+ } else if resp.StatusCode != 200 {
+ var buf bytes.Buffer
+ io.Copy(&buf, resp.Body)
+ return nil, nil, fmt.Errorf(
+ "Unexpected response %d: %s", resp.StatusCode, buf.String())
+ }
+
+ var out Intention
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, qm, nil
+}
+
+// IntentionDelete deletes a single intention.
+func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) {
+ r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &WriteMeta{}
+ qm.RequestTime = rtt
+
+ return qm, nil
+}
+
+// IntentionMatch returns the list of intentions that match a given source
+// or destination. The returned intentions are ordered by precedence where
+// result[0] is the highest precedence (if that matches, then that rule overrides
+// all other rules).
+//
+// Matching can be done for multiple names at the same time. The resulting
+// map is keyed by the given names. Casing is preserved.
+func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/connect/intentions/match")
+ r.setQueryOptions(q)
+ r.params.Set("by", string(args.By))
+ for _, name := range args.Names {
+ r.params.Add("name", name)
+ }
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out map[string][]*Intention
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// IntentionCheck returns whether a given source/destination would be allowed
+// or not given the current set of intentions and the configuration of Consul.
+func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/connect/intentions/check")
+ r.setQueryOptions(q)
+ r.params.Set("source", args.Source)
+ r.params.Set("destination", args.Destination)
+ if args.SourceType != "" {
+ r.params.Set("source-type", string(args.SourceType))
+ }
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return false, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out struct{ Allowed bool }
+ if err := decodeBody(resp, &out); err != nil {
+ return false, nil, err
+ }
+ return out.Allowed, qm, nil
+}
+
+// IntentionCreate will create a new intention. The ID in the given
+// structure must be empty and a generate ID will be returned on
+// success.
+func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) {
+ r := c.c.newRequest("POST", "/v1/connect/intentions")
+ r.setWriteOptions(q)
+ r.obj = ixn
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// IntentionUpdate will update an existing intention. The ID in the given
+// structure must be non-empty.
+func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID)
+ r.setWriteOptions(q)
+ r.obj = ixn
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+ return wm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go
new file mode 100644
index 0000000..776630f
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/coordinate.go
@@ -0,0 +1,106 @@
+package api
+
+import (
+ "github.com/hashicorp/serf/coordinate"
+)
+
+// CoordinateEntry represents a node and its associated network coordinate.
+type CoordinateEntry struct {
+ Node string
+ Segment string
+ Coord *coordinate.Coordinate
+}
+
+// CoordinateDatacenterMap has the coordinates for servers in a given datacenter
+// and area. Network coordinates are only compatible within the same area.
+type CoordinateDatacenterMap struct {
+ Datacenter string
+ AreaID string
+ Coordinates []CoordinateEntry
+}
+
+// Coordinate can be used to query the coordinate endpoints
+type Coordinate struct {
+ c *Client
+}
+
+// Coordinate returns a handle to the coordinate endpoints
+func (c *Client) Coordinate() *Coordinate {
+ return &Coordinate{c}
+}
+
+// Datacenters is used to return the coordinates of all the servers in the WAN
+// pool.
+func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
+ r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
+ _, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []*CoordinateDatacenterMap
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// Nodes is used to return the coordinates of all the nodes in the LAN pool.
+func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/coordinate/nodes")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*CoordinateEntry
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Update inserts or updates the LAN coordinate of a node.
+func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("PUT", "/v1/coordinate/update")
+ r.setWriteOptions(q)
+ r.obj = coord
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ return wm, nil
+}
+
+// Node is used to return the coordinates of a single node in the LAN pool.
+func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
+ r := c.c.newRequest("GET", "/v1/coordinate/node/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*CoordinateEntry
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go
new file mode 100644
index 0000000..2380468
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/debug.go
@@ -0,0 +1,106 @@
+package api
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strconv"
+)
+
+// Debug can be used to query the /debug/pprof endpoints to gather
+// profiling information about the target agent.Debug
+//
+// The agent must have enable_debug set to true for profiling to be enabled
+// and for these endpoints to function.
+type Debug struct {
+ c *Client
+}
+
+// Debug returns a handle that exposes the internal debug endpoints.
+func (c *Client) Debug() *Debug {
+ return &Debug{c}
+}
+
+// Heap returns a pprof heap dump
+func (d *Debug) Heap() ([]byte, error) {
+ r := d.c.newRequest("GET", "/debug/pprof/heap")
+ _, resp, err := d.c.doRequest(r)
+ if err != nil {
+ return nil, fmt.Errorf("error making request: %s", err)
+ }
+ defer resp.Body.Close()
+
+ // We return a raw response because we're just passing through a response
+ // from the pprof handlers
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding body: %s", err)
+ }
+
+ return body, nil
+}
+
+// Profile returns a pprof CPU profile for the specified number of seconds
+func (d *Debug) Profile(seconds int) ([]byte, error) {
+ r := d.c.newRequest("GET", "/debug/pprof/profile")
+
+ // Capture a profile for the specified number of seconds
+ r.params.Set("seconds", strconv.Itoa(seconds))
+
+ _, resp, err := d.c.doRequest(r)
+ if err != nil {
+ return nil, fmt.Errorf("error making request: %s", err)
+ }
+ defer resp.Body.Close()
+
+ // We return a raw response because we're just passing through a response
+ // from the pprof handlers
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding body: %s", err)
+ }
+
+ return body, nil
+}
+
+// Trace returns an execution trace
+func (d *Debug) Trace(seconds int) ([]byte, error) {
+ r := d.c.newRequest("GET", "/debug/pprof/trace")
+
+ // Capture a trace for the specified number of seconds
+ r.params.Set("seconds", strconv.Itoa(seconds))
+
+ _, resp, err := d.c.doRequest(r)
+ if err != nil {
+ return nil, fmt.Errorf("error making request: %s", err)
+ }
+ defer resp.Body.Close()
+
+ // We return a raw response because we're just passing through a response
+ // from the pprof handlers
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding body: %s", err)
+ }
+
+ return body, nil
+}
+
+// Goroutine returns a pprof goroutine profile
+func (d *Debug) Goroutine() ([]byte, error) {
+ r := d.c.newRequest("GET", "/debug/pprof/goroutine")
+
+ _, resp, err := d.c.doRequest(r)
+ if err != nil {
+ return nil, fmt.Errorf("error making request: %s", err)
+ }
+ defer resp.Body.Close()
+
+ // We return a raw response because we're just passing through a response
+ // from the pprof handlers
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding body: %s", err)
+ }
+
+ return body, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go
new file mode 100644
index 0000000..75fdbae
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/discovery_chain.go
@@ -0,0 +1,229 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// DiscoveryChain can be used to query the discovery-chain endpoints
+type DiscoveryChain struct {
+ c *Client
+}
+
+// DiscoveryChain returns a handle to the discovery-chain endpoints
+func (c *Client) DiscoveryChain() *DiscoveryChain {
+ return &DiscoveryChain{c}
+}
+
+func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryOptions) (*DiscoveryChainResponse, *QueryMeta, error) {
+ if name == "" {
+ return nil, nil, fmt.Errorf("Name parameter must not be empty")
+ }
+
+ method := "GET"
+ if opts != nil && opts.requiresPOST() {
+ method = "POST"
+ }
+
+ r := d.c.newRequest(method, fmt.Sprintf("/v1/discovery-chain/%s", name))
+ r.setQueryOptions(q)
+
+ if opts != nil {
+ if opts.EvaluateInDatacenter != "" {
+ r.params.Set("compile-dc", opts.EvaluateInDatacenter)
+ }
+ }
+
+ if method == "POST" {
+ r.obj = opts
+ }
+
+ rtt, resp, err := requireOK(d.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out DiscoveryChainResponse
+
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, qm, nil
+}
+
+type DiscoveryChainOptions struct {
+ EvaluateInDatacenter string `json:"-"`
+
+ // OverrideMeshGateway allows for the mesh gateway setting to be overridden
+ // for any resolver in the compiled chain.
+ OverrideMeshGateway MeshGatewayConfig `json:",omitempty"`
+
+ // OverrideProtocol allows for the final protocol for the chain to be
+ // altered.
+ //
+ // - If the chain ordinarily would be TCP and an L7 protocol is passed here
+ // the chain will not include Routers or Splitters.
+ //
+ // - If the chain ordinarily would be L7 and TCP is passed here the chain
+ // will not include Routers or Splitters.
+ OverrideProtocol string `json:",omitempty"`
+
+ // OverrideConnectTimeout allows for the ConnectTimeout setting to be
+ // overridden for any resolver in the compiled chain.
+ OverrideConnectTimeout time.Duration `json:",omitempty"`
+}
+
+func (o *DiscoveryChainOptions) requiresPOST() bool {
+ if o == nil {
+ return false
+ }
+ return o.OverrideMeshGateway.Mode != "" ||
+ o.OverrideProtocol != "" ||
+ o.OverrideConnectTimeout != 0
+}
+
+type DiscoveryChainResponse struct {
+ Chain *CompiledDiscoveryChain
+}
+
+type CompiledDiscoveryChain struct {
+ ServiceName string
+ Namespace string
+ Datacenter string
+
+ // CustomizationHash is a unique hash of any data that affects the
+ // compilation of the discovery chain other than config entries or the
+ // name/namespace/datacenter evaluation criteria.
+ //
+ // If set, this value should be used to prefix/suffix any generated load
+ // balancer data plane objects to avoid sharing customized and
+ // non-customized versions.
+ CustomizationHash string
+
+ // Protocol is the overall protocol shared by everything in the chain.
+ Protocol string
+
+ // StartNode is the first key into the Nodes map that should be followed
+ // when walking the discovery chain.
+ StartNode string
+
+ // Nodes contains all nodes available for traversal in the chain keyed by a
+ // unique name. You can walk this by starting with StartNode.
+ //
+ // NOTE: The names should be treated as opaque values and are only
+ // guaranteed to be consistent within a single compilation.
+ Nodes map[string]*DiscoveryGraphNode
+
+ // Targets is a list of all targets used in this chain.
+ //
+ // NOTE: The names should be treated as opaque values and are only
+ // guaranteed to be consistent within a single compilation.
+ Targets map[string]*DiscoveryTarget
+}
+
+const (
+ DiscoveryGraphNodeTypeRouter = "router"
+ DiscoveryGraphNodeTypeSplitter = "splitter"
+ DiscoveryGraphNodeTypeResolver = "resolver"
+)
+
+// DiscoveryGraphNode is a single node in the compiled discovery chain.
+type DiscoveryGraphNode struct {
+ Type string
+ Name string // this is NOT necessarily a service
+
+ // fields for Type==router
+ Routes []*DiscoveryRoute
+
+ // fields for Type==splitter
+ Splits []*DiscoverySplit
+
+ // fields for Type==resolver
+ Resolver *DiscoveryResolver
+}
+
+// compiled form of ServiceRoute
+type DiscoveryRoute struct {
+ Definition *ServiceRoute
+ NextNode string
+}
+
+// compiled form of ServiceSplit
+type DiscoverySplit struct {
+ Weight float32
+ NextNode string
+}
+
+// compiled form of ServiceResolverConfigEntry
+type DiscoveryResolver struct {
+ Default bool
+ ConnectTimeout time.Duration
+ Target string
+ Failover *DiscoveryFailover
+}
+
+func (r *DiscoveryResolver) MarshalJSON() ([]byte, error) {
+ type Alias DiscoveryResolver
+ exported := &struct {
+ ConnectTimeout string `json:",omitempty"`
+ *Alias
+ }{
+ ConnectTimeout: r.ConnectTimeout.String(),
+ Alias: (*Alias)(r),
+ }
+ if r.ConnectTimeout == 0 {
+ exported.ConnectTimeout = ""
+ }
+
+ return json.Marshal(exported)
+}
+
+func (r *DiscoveryResolver) UnmarshalJSON(data []byte) error {
+ type Alias DiscoveryResolver
+ aux := &struct {
+ ConnectTimeout string
+ *Alias
+ }{
+ Alias: (*Alias)(r),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+ var err error
+ if aux.ConnectTimeout != "" {
+ if r.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// compiled form of ServiceResolverFailover
+type DiscoveryFailover struct {
+ Targets []string
+}
+
+// DiscoveryTarget represents all of the inputs necessary to use a resolver
+// config entry to execute a catalog query to generate a list of service
+// instances during discovery.
+type DiscoveryTarget struct {
+ ID string
+
+ Service string
+ ServiceSubset string
+ Namespace string
+ Datacenter string
+
+ MeshGateway MeshGatewayConfig
+ Subset ServiceResolverSubset
+ External bool
+ SNI string
+ Name string
+}
diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go
new file mode 100644
index 0000000..85b5b06
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/event.go
@@ -0,0 +1,104 @@
+package api
+
+import (
+ "bytes"
+ "strconv"
+)
+
+// Event can be used to query the Event endpoints
+type Event struct {
+ c *Client
+}
+
+// UserEvent represents an event that was fired by the user
+type UserEvent struct {
+ ID string
+ Name string
+ Payload []byte
+ NodeFilter string
+ ServiceFilter string
+ TagFilter string
+ Version int
+ LTime uint64
+}
+
+// Event returns a handle to the event endpoints
+func (c *Client) Event() *Event {
+ return &Event{c}
+}
+
+// Fire is used to fire a new user event. Only the Name, Payload and Filters
+// are respected. This returns the ID or an associated error. Cross DC requests
+// are supported.
+func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
+ r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
+ r.setWriteOptions(q)
+ if params.NodeFilter != "" {
+ r.params.Set("node", params.NodeFilter)
+ }
+ if params.ServiceFilter != "" {
+ r.params.Set("service", params.ServiceFilter)
+ }
+ if params.TagFilter != "" {
+ r.params.Set("tag", params.TagFilter)
+ }
+ if params.Payload != nil {
+ r.body = bytes.NewReader(params.Payload)
+ }
+
+ rtt, resp, err := requireOK(e.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out UserEvent
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// List is used to get the most recent events an agent has received.
+// This list can be optionally filtered by the name. This endpoint supports
+// quasi-blocking queries. The index is not monotonic, nor does it provide provide
+// LastContact or KnownLeader.
+func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
+ r := e.c.newRequest("GET", "/v1/event/list")
+ r.setQueryOptions(q)
+ if name != "" {
+ r.params.Set("name", name)
+ }
+ rtt, resp, err := requireOK(e.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var entries []*UserEvent
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// IDToIndex is a bit of a hack. This simulates the index generation to
+// convert an event ID into a WaitIndex.
+func (e *Event) IDToIndex(uuid string) uint64 {
+ lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
+ upper := uuid[19:23] + uuid[24:36]
+ lowVal, err := strconv.ParseUint(lower, 16, 64)
+ if err != nil {
+ panic("Failed to convert " + lower)
+ }
+ highVal, err := strconv.ParseUint(upper, 16, 64)
+ if err != nil {
+ panic("Failed to convert " + upper)
+ }
+ return lowVal ^ highVal
+}
diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod
new file mode 100644
index 0000000..d9902d4
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/go.mod
@@ -0,0 +1,16 @@
+module github.com/hashicorp/consul/api
+
+go 1.12
+
+replace github.com/hashicorp/consul/sdk => ../sdk
+
+require (
+ github.com/hashicorp/consul/sdk v0.6.0
+ github.com/hashicorp/go-cleanhttp v0.5.1
+ github.com/hashicorp/go-hclog v0.12.0
+ github.com/hashicorp/go-rootcerts v1.0.2
+ github.com/hashicorp/go-uuid v1.0.1
+ github.com/hashicorp/serf v0.9.3
+ github.com/mitchellh/mapstructure v1.1.2
+ github.com/stretchr/testify v1.4.0
+)
diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum
new file mode 100644
index 0000000..3c26420
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/go.sum
@@ -0,0 +1,131 @@
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g=
+github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM=
+github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
+github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/dns v1.1.26 h1:gPxPSwALAeHJSjarOs00QjVdV9QoBvc1D2ujQUr5BzU=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go
new file mode 100644
index 0000000..99b9ac2
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/health.go
@@ -0,0 +1,375 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "time"
+)
+
+const (
+ // HealthAny is special, and is used as a wild card,
+ // not as a specific state.
+ HealthAny = "any"
+ HealthPassing = "passing"
+ HealthWarning = "warning"
+ HealthCritical = "critical"
+ HealthMaint = "maintenance"
+)
+
+const (
+ serviceHealth = "service"
+ connectHealth = "connect"
+ ingressHealth = "ingress"
+)
+
+const (
+ // NodeMaint is the special key set by a node in maintenance mode.
+ NodeMaint = "_node_maintenance"
+
+ // ServiceMaintPrefix is the prefix for a service in maintenance mode.
+ ServiceMaintPrefix = "_service_maintenance:"
+)
+
+// HealthCheck is used to represent a single check
+type HealthCheck struct {
+ Node string
+ CheckID string
+ Name string
+ Status string
+ Notes string
+ Output string
+ ServiceID string
+ ServiceName string
+ ServiceTags []string
+ Type string
+ Namespace string `json:",omitempty"`
+
+ Definition HealthCheckDefinition
+
+ CreateIndex uint64
+ ModifyIndex uint64
+}
+
+// HealthCheckDefinition is used to store the details about
+// a health check's execution.
+type HealthCheckDefinition struct {
+ HTTP string
+ Header map[string][]string
+ Method string
+ Body string
+ TLSSkipVerify bool
+ TCP string
+ IntervalDuration time.Duration `json:"-"`
+ TimeoutDuration time.Duration `json:"-"`
+ DeregisterCriticalServiceAfterDuration time.Duration `json:"-"`
+
+ // DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead.
+ Interval ReadableDuration
+ Timeout ReadableDuration
+ DeregisterCriticalServiceAfter ReadableDuration
+}
+
+func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) {
+ type Alias HealthCheckDefinition
+ out := &struct {
+ Interval string
+ Timeout string
+ DeregisterCriticalServiceAfter string
+ *Alias
+ }{
+ Interval: d.Interval.String(),
+ Timeout: d.Timeout.String(),
+ DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(),
+ Alias: (*Alias)(d),
+ }
+
+ if d.IntervalDuration != 0 {
+ out.Interval = d.IntervalDuration.String()
+ } else if d.Interval != 0 {
+ out.Interval = d.Interval.String()
+ }
+ if d.TimeoutDuration != 0 {
+ out.Timeout = d.TimeoutDuration.String()
+ } else if d.Timeout != 0 {
+ out.Timeout = d.Timeout.String()
+ }
+ if d.DeregisterCriticalServiceAfterDuration != 0 {
+ out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String()
+ } else if d.DeregisterCriticalServiceAfter != 0 {
+ out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String()
+ }
+
+ return json.Marshal(out)
+}
+
+func (t *HealthCheckDefinition) UnmarshalJSON(data []byte) (err error) {
+ type Alias HealthCheckDefinition
+ aux := &struct {
+ IntervalDuration interface{}
+ TimeoutDuration interface{}
+ DeregisterCriticalServiceAfterDuration interface{}
+ *Alias
+ }{
+ Alias: (*Alias)(t),
+ }
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ // Parse the values into both the time.Duration and old ReadableDuration fields.
+
+ if aux.IntervalDuration == nil {
+ t.IntervalDuration = time.Duration(t.Interval)
+ } else {
+ switch v := aux.IntervalDuration.(type) {
+ case string:
+ if t.IntervalDuration, err = time.ParseDuration(v); err != nil {
+ return err
+ }
+ case float64:
+ t.IntervalDuration = time.Duration(v)
+ }
+ t.Interval = ReadableDuration(t.IntervalDuration)
+ }
+
+ if aux.TimeoutDuration == nil {
+ t.TimeoutDuration = time.Duration(t.Timeout)
+ } else {
+ switch v := aux.TimeoutDuration.(type) {
+ case string:
+ if t.TimeoutDuration, err = time.ParseDuration(v); err != nil {
+ return err
+ }
+ case float64:
+ t.TimeoutDuration = time.Duration(v)
+ }
+ t.Timeout = ReadableDuration(t.TimeoutDuration)
+ }
+ if aux.DeregisterCriticalServiceAfterDuration == nil {
+ t.DeregisterCriticalServiceAfterDuration = time.Duration(t.DeregisterCriticalServiceAfter)
+ } else {
+ switch v := aux.DeregisterCriticalServiceAfterDuration.(type) {
+ case string:
+ if t.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(v); err != nil {
+ return err
+ }
+ case float64:
+ t.DeregisterCriticalServiceAfterDuration = time.Duration(v)
+ }
+ t.DeregisterCriticalServiceAfter = ReadableDuration(t.DeregisterCriticalServiceAfterDuration)
+ }
+
+ return nil
+}
+
+// HealthChecks is a collection of HealthCheck structs.
+type HealthChecks []*HealthCheck
+
+// AggregatedStatus returns the "best" status for the list of health checks.
+// Because a given entry may have many service and node-level health checks
+// attached, this function determines the best representative of the status as
+// as single string using the following heuristic:
+//
+// maintenance > critical > warning > passing
+//
+func (c HealthChecks) AggregatedStatus() string {
+ var passing, warning, critical, maintenance bool
+ for _, check := range c {
+ id := check.CheckID
+ if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {
+ maintenance = true
+ continue
+ }
+
+ switch check.Status {
+ case HealthPassing:
+ passing = true
+ case HealthWarning:
+ warning = true
+ case HealthCritical:
+ critical = true
+ default:
+ return ""
+ }
+ }
+
+ switch {
+ case maintenance:
+ return HealthMaint
+ case critical:
+ return HealthCritical
+ case warning:
+ return HealthWarning
+ case passing:
+ return HealthPassing
+ default:
+ return HealthPassing
+ }
+}
+
+// ServiceEntry is used for the health service endpoint
+type ServiceEntry struct {
+ Node *Node
+ Service *AgentService
+ Checks HealthChecks
+}
+
+// Health can be used to query the Health endpoints
+type Health struct {
+ c *Client
+}
+
+// Health returns a handle to the health endpoints
+func (c *Client) Health() *Health {
+ return &Health{c}
+}
+
+// Node is used to query for checks belonging to a given node
+func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/health/node/"+node)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out HealthChecks
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Checks is used to return the checks associated with a service
+func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+ r := h.c.newRequest("GET", "/v1/health/checks/"+service)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out HealthChecks
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Service is used to query health information along with service info
+// for a given service. It can optionally do server-side filtering on a tag
+// or nodes with passing health checks only.
+func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+ var tags []string
+ if tag != "" {
+ tags = []string{tag}
+ }
+ return h.service(service, tags, passingOnly, q, serviceHealth)
+}
+
+func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+ return h.service(service, tags, passingOnly, q, serviceHealth)
+}
+
+// Connect is equivalent to Service except that it will only return services
+// which are Connect-enabled and will returns the connection address for Connect
+// client's to use which may be a proxy in front of the named service. If
+// passingOnly is true only instances where both the service and any proxy are
+// healthy will be returned.
+func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+ var tags []string
+ if tag != "" {
+ tags = []string{tag}
+ }
+ return h.service(service, tags, passingOnly, q, connectHealth)
+}
+
+func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+ return h.service(service, tags, passingOnly, q, connectHealth)
+}
+
+// Ingress is equivalent to Connect except that it will only return associated
+// ingress gateways for the requested service.
+func (h *Health) Ingress(service string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+ var tags []string
+ return h.service(service, tags, passingOnly, q, ingressHealth)
+}
+
+func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, healthType string) ([]*ServiceEntry, *QueryMeta, error) {
+ var path string
+ switch healthType {
+ case connectHealth:
+ path = "/v1/health/connect/" + service
+ case ingressHealth:
+ path = "/v1/health/ingress/" + service
+ default:
+ path = "/v1/health/service/" + service
+ }
+
+ r := h.c.newRequest("GET", path)
+ r.setQueryOptions(q)
+ if len(tags) > 0 {
+ for _, tag := range tags {
+ r.params.Add("tag", tag)
+ }
+ }
+ if passingOnly {
+ r.params.Set(HealthPassing, "1")
+ }
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out []*ServiceEntry
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// State is used to retrieve all the checks in a given state.
+// The wildcard "any" state can also be used for all checks.
+func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+ switch state {
+ case HealthAny:
+ case HealthWarning:
+ case HealthCritical:
+ case HealthPassing:
+ default:
+ return nil, nil, fmt.Errorf("Unsupported state: %v", state)
+ }
+ r := h.c.newRequest("GET", "/v1/health/state/"+state)
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(h.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ var out HealthChecks
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go
new file mode 100644
index 0000000..351d287
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/kv.go
@@ -0,0 +1,290 @@
+package api
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+)
+
+// KVPair is used to represent a single K/V entry
+type KVPair struct {
+ // Key is the name of the key. It is also part of the URL path when accessed
+ // via the API.
+ Key string
+
+ // CreateIndex holds the index corresponding the creation of this KVPair. This
+ // is a read-only field.
+ CreateIndex uint64
+
+ // ModifyIndex is used for the Check-And-Set operations and can also be fed
+ // back into the WaitIndex of the QueryOptions in order to perform blocking
+ // queries.
+ ModifyIndex uint64
+
+ // LockIndex holds the index corresponding to a lock on this key, if any. This
+ // is a read-only field.
+ LockIndex uint64
+
+ // Flags are any user-defined flags on the key. It is up to the implementer
+ // to check these values, since Consul does not treat them specially.
+ Flags uint64
+
+ // Value is the value for the key. This can be any value, but it will be
+ // base64 encoded upon transport.
+ Value []byte
+
+ // Session is a string representing the ID of the session. Any other
+ // interactions with this key over the same session must specify the same
+ // session ID.
+ Session string
+
+ // Namespace is the namespace the KVPair is associated with
+ // Namespacing is a Consul Enterprise feature.
+ Namespace string `json:",omitempty"`
+}
+
+// KVPairs is a list of KVPair objects
+type KVPairs []*KVPair
+
+// KV is used to manipulate the K/V API
+type KV struct {
+ c *Client
+}
+
+// KV is used to return a handle to the K/V apis
+func (c *Client) KV() *KV {
+ return &KV{c}
+}
+
+// Get is used to lookup a single key. The returned pointer
+// to the KVPair will be nil if the key does not exist.
+func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
+ resp, qm, err := k.getInternal(key, nil, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []*KVPair
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List is used to lookup all keys under a prefix
+func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
+ resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []*KVPair
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// Keys is used to list all the keys under a prefix. Optionally,
+// a separator can be used to limit the responses.
+func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
+ params := map[string]string{"keys": ""}
+ if separator != "" {
+ params["separator"] = separator
+ }
+ resp, qm, err := k.getInternal(prefix, params, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp == nil {
+ return nil, qm, nil
+ }
+ defer resp.Body.Close()
+
+ var entries []string
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
+ r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/"))
+ r.setQueryOptions(q)
+ for param, val := range params {
+ r.params.Set(param, val)
+ }
+ rtt, resp, err := k.c.doRequest(r)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if resp.StatusCode == 404 {
+ resp.Body.Close()
+ return nil, qm, nil
+ } else if resp.StatusCode != 200 {
+ resp.Body.Close()
+ return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+ }
+ return resp, qm, nil
+}
+
+// Put is used to write a new value. Only the
+// Key, Flags and Value is respected.
+func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
+ params := make(map[string]string, 1)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ _, wm, err := k.put(p.Key, params, p.Value, q)
+ return wm, err
+}
+
+// CAS is used for a Check-And-Set operation. The Key,
+// ModifyIndex, Flags and Value are respected. Returns true
+// on success or false on failures.
+func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
+ return k.put(p.Key, params, p.Value, q)
+}
+
+// Acquire is used for a lock acquisition operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["acquire"] = p.Session
+ return k.put(p.Key, params, p.Value, q)
+}
+
+// Release is used for a lock release operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := make(map[string]string, 2)
+ if p.Flags != 0 {
+ params["flags"] = strconv.FormatUint(p.Flags, 10)
+ }
+ params["release"] = p.Session
+ return k.put(p.Key, params, p.Value, q)
+}
+
+func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
+ if len(key) > 0 && key[0] == '/' {
+ return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key)
+ }
+
+ r := k.c.newRequest("PUT", "/v1/kv/"+key)
+ r.setWriteOptions(q)
+ for param, val := range params {
+ r.params.Set(param, val)
+ }
+ r.body = bytes.NewReader(body)
+ rtt, resp, err := requireOK(k.c.doRequest(r))
+ if err != nil {
+ return false, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &WriteMeta{}
+ qm.RequestTime = rtt
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, resp.Body); err != nil {
+ return false, nil, fmt.Errorf("Failed to read response: %v", err)
+ }
+ res := strings.Contains(buf.String(), "true")
+ return res, qm, nil
+}
+
+// Delete is used to delete a single key
+func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
+ _, qm, err := k.deleteInternal(key, nil, w)
+ return qm, err
+}
+
+// DeleteCAS is used for a Delete Check-And-Set operation. The Key
+// and ModifyIndex are respected. Returns true on success or false on failures.
+func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+ params := map[string]string{
+ "cas": strconv.FormatUint(p.ModifyIndex, 10),
+ }
+ return k.deleteInternal(p.Key, params, q)
+}
+
+// DeleteTree is used to delete all keys under a prefix
+func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
+ _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
+ return qm, err
+}
+
+func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
+ r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/"))
+ r.setWriteOptions(q)
+ for param, val := range params {
+ r.params.Set(param, val)
+ }
+ rtt, resp, err := requireOK(k.c.doRequest(r))
+ if err != nil {
+ return false, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &WriteMeta{}
+ qm.RequestTime = rtt
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, resp.Body); err != nil {
+ return false, nil, fmt.Errorf("Failed to read response: %v", err)
+ }
+ res := strings.Contains(buf.String(), "true")
+ return res, qm, nil
+}
+
+// The Txn function has been deprecated from the KV object; please see the Txn
+// object for more information about Transactions.
+func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
+ var ops TxnOps
+ for _, op := range txn {
+ ops = append(ops, &TxnOp{KV: op})
+ }
+
+ respOk, txnResp, qm, err := k.c.txn(ops, q)
+ if err != nil {
+ return false, nil, nil, err
+ }
+
+ // Convert from the internal format.
+ kvResp := KVTxnResponse{
+ Errors: txnResp.Errors,
+ }
+ for _, result := range txnResp.Results {
+ kvResp.Results = append(kvResp.Results, result.KV)
+ }
+ return respOk, &kvResp, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go
new file mode 100644
index 0000000..5cacee8
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/lock.go
@@ -0,0 +1,406 @@
+package api
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+const (
+ // DefaultLockSessionName is the Session Name we assign if none is provided
+ DefaultLockSessionName = "Consul API Lock"
+
+ // DefaultLockSessionTTL is the default session TTL if no Session is provided
+ // when creating a new Lock. This is used because we do not have another
+ // other check to depend upon.
+ DefaultLockSessionTTL = "15s"
+
+ // DefaultLockWaitTime is how long we block for at a time to check if lock
+ // acquisition is possible. This affects the minimum time it takes to cancel
+ // a Lock acquisition.
+ DefaultLockWaitTime = 15 * time.Second
+
+ // DefaultLockRetryTime is how long we wait after a failed lock acquisition
+ // before attempting to do the lock again. This is so that once a lock-delay
+ // is in effect, we do not hot loop retrying the acquisition.
+ DefaultLockRetryTime = 5 * time.Second
+
+ // DefaultMonitorRetryTime is how long we wait after a failed monitor check
+ // of a lock (500 response code). This allows the monitor to ride out brief
+ // periods of unavailability, subject to the MonitorRetries setting in the
+ // lock options which is by default set to 0, disabling this feature. This
+ // affects locks and semaphores.
+ DefaultMonitorRetryTime = 2 * time.Second
+
+ // LockFlagValue is a magic flag we set to indicate a key
+ // is being used for a lock. It is used to detect a potential
+ // conflict with a semaphore.
+ LockFlagValue = 0x2ddccbc058a50c18
+)
+
+var (
+ // ErrLockHeld is returned if we attempt to double lock
+ ErrLockHeld = fmt.Errorf("Lock already held")
+
+ // ErrLockNotHeld is returned if we attempt to unlock a lock
+ // that we do not hold.
+ ErrLockNotHeld = fmt.Errorf("Lock not held")
+
+ // ErrLockInUse is returned if we attempt to destroy a lock
+ // that is in use.
+ ErrLockInUse = fmt.Errorf("Lock in use")
+
+ // ErrLockConflict is returned if the flags on a key
+ // used for a lock do not match expectation
+ ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
+)
+
+// Lock is used to implement client-side leader election. It is follows the
+// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html.
+type Lock struct {
+ c *Client
+ opts *LockOptions
+
+ isHeld bool
+ sessionRenew chan struct{}
+ lockSession string
+ l sync.Mutex
+}
+
+// LockOptions is used to parameterize the Lock behavior.
+type LockOptions struct {
+ Key string // Must be set and have write permissions
+ Value []byte // Optional, value to associate with the lock
+ Session string // Optional, created if not specified
+ SessionOpts *SessionEntry // Optional, options to use when creating a session
+ SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given)
+ SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given)
+ MonitorRetries int // Optional, defaults to 0 which means no retries
+ MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
+ LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime
+ LockTryOnce bool // Optional, defaults to false which means try forever
+ Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace
+}
+
+// LockKey returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockKey(key string) (*Lock, error) {
+ opts := &LockOptions{
+ Key: key,
+ }
+ return c.LockOpts(opts)
+}
+
+// LockOpts returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
+ if opts.Key == "" {
+ return nil, fmt.Errorf("missing key")
+ }
+ if opts.SessionName == "" {
+ opts.SessionName = DefaultLockSessionName
+ }
+ if opts.SessionTTL == "" {
+ opts.SessionTTL = DefaultLockSessionTTL
+ } else {
+ if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+ return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+ }
+ }
+ if opts.MonitorRetryTime == 0 {
+ opts.MonitorRetryTime = DefaultMonitorRetryTime
+ }
+ if opts.LockWaitTime == 0 {
+ opts.LockWaitTime = DefaultLockWaitTime
+ }
+ l := &Lock{
+ c: c,
+ opts: opts,
+ }
+ return l, nil
+}
+
+// Lock attempts to acquire the lock and blocks while doing so.
+// Providing a non-nil stopCh can be used to abort the lock attempt.
+// Returns a channel that is closed if our lock is lost or an error.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the lock is held until Unlock() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the lock being lost.
+func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+ // Hold the lock as we try to acquire
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ // Check if we already hold the lock
+ if l.isHeld {
+ return nil, ErrLockHeld
+ }
+
+ wOpts := WriteOptions{
+ Namespace: l.opts.Namespace,
+ }
+
+ // Check if we need to create a session first
+ l.lockSession = l.opts.Session
+ if l.lockSession == "" {
+ s, err := l.createSession()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create session: %v", err)
+ }
+
+ l.sessionRenew = make(chan struct{})
+ l.lockSession = s
+
+ session := l.c.Session()
+ go session.RenewPeriodic(l.opts.SessionTTL, s, &wOpts, l.sessionRenew)
+
+ // If we fail to acquire the lock, cleanup the session
+ defer func() {
+ if !l.isHeld {
+ close(l.sessionRenew)
+ l.sessionRenew = nil
+ }
+ }()
+ }
+
+ // Setup the query options
+ kv := l.c.KV()
+ qOpts := QueryOptions{
+ WaitTime: l.opts.LockWaitTime,
+ Namespace: l.opts.Namespace,
+ }
+
+ start := time.Now()
+ attempts := 0
+WAIT:
+ // Check if we should quit
+ select {
+ case <-stopCh:
+ return nil, nil
+ default:
+ }
+
+ // Handle the one-shot mode.
+ if l.opts.LockTryOnce && attempts > 0 {
+ elapsed := time.Since(start)
+ if elapsed > l.opts.LockWaitTime {
+ return nil, nil
+ }
+
+ // Query wait time should not exceed the lock wait time
+ qOpts.WaitTime = l.opts.LockWaitTime - elapsed
+ }
+ attempts++
+
+ // Look for an existing lock, blocking until not taken
+ pair, meta, err := kv.Get(l.opts.Key, &qOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read lock: %v", err)
+ }
+ if pair != nil && pair.Flags != LockFlagValue {
+ return nil, ErrLockConflict
+ }
+ locked := false
+ if pair != nil && pair.Session == l.lockSession {
+ goto HELD
+ }
+ if pair != nil && pair.Session != "" {
+ qOpts.WaitIndex = meta.LastIndex
+ goto WAIT
+ }
+
+ // Try to acquire the lock
+ pair = l.lockEntry(l.lockSession)
+
+ locked, _, err = kv.Acquire(pair, &wOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to acquire lock: %v", err)
+ }
+
+ // Handle the case of not getting the lock
+ if !locked {
+ // Determine why the lock failed
+ qOpts.WaitIndex = 0
+ pair, meta, err = kv.Get(l.opts.Key, &qOpts)
+ if err != nil {
+ return nil, err
+ }
+ if pair != nil && pair.Session != "" {
+ //If the session is not null, this means that a wait can safely happen
+ //using a long poll
+ qOpts.WaitIndex = meta.LastIndex
+ goto WAIT
+ } else {
+ // If the session is empty and the lock failed to acquire, then it means
+ // a lock-delay is in effect and a timed wait must be used
+ select {
+ case <-time.After(DefaultLockRetryTime):
+ goto WAIT
+ case <-stopCh:
+ return nil, nil
+ }
+ }
+ }
+
+HELD:
+ // Watch to ensure we maintain leadership
+ leaderCh := make(chan struct{})
+ go l.monitorLock(l.lockSession, leaderCh)
+
+ // Set that we own the lock
+ l.isHeld = true
+
+ // Locked! All done
+ return leaderCh, nil
+}
+
+// Unlock released the lock. It is an error to call this
+// if the lock is not currently held.
+func (l *Lock) Unlock() error {
+ // Hold the lock as we try to release
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ // Ensure the lock is actually held
+ if !l.isHeld {
+ return ErrLockNotHeld
+ }
+
+ // Set that we no longer own the lock
+ l.isHeld = false
+
+ // Stop the session renew
+ if l.sessionRenew != nil {
+ defer func() {
+ close(l.sessionRenew)
+ l.sessionRenew = nil
+ }()
+ }
+
+ // Get the lock entry, and clear the lock session
+ lockEnt := l.lockEntry(l.lockSession)
+ l.lockSession = ""
+
+ // Release the lock explicitly
+ kv := l.c.KV()
+ w := WriteOptions{Namespace: l.opts.Namespace}
+
+ _, _, err := kv.Release(lockEnt, &w)
+ if err != nil {
+ return fmt.Errorf("failed to release lock: %v", err)
+ }
+ return nil
+}
+
+// Destroy is used to cleanup the lock entry. It is not necessary
+// to invoke. It will fail if the lock is in use.
+func (l *Lock) Destroy() error {
+ // Hold the lock as we try to release
+ l.l.Lock()
+ defer l.l.Unlock()
+
+ // Check if we already hold the lock
+ if l.isHeld {
+ return ErrLockHeld
+ }
+
+ // Look for an existing lock
+ kv := l.c.KV()
+ q := QueryOptions{Namespace: l.opts.Namespace}
+
+ pair, _, err := kv.Get(l.opts.Key, &q)
+ if err != nil {
+ return fmt.Errorf("failed to read lock: %v", err)
+ }
+
+ // Nothing to do if the lock does not exist
+ if pair == nil {
+ return nil
+ }
+
+ // Check for possible flag conflict
+ if pair.Flags != LockFlagValue {
+ return ErrLockConflict
+ }
+
+ // Check if it is in use
+ if pair.Session != "" {
+ return ErrLockInUse
+ }
+
+ // Attempt the delete
+ w := WriteOptions{Namespace: l.opts.Namespace}
+ didRemove, _, err := kv.DeleteCAS(pair, &w)
+ if err != nil {
+ return fmt.Errorf("failed to remove lock: %v", err)
+ }
+ if !didRemove {
+ return ErrLockInUse
+ }
+ return nil
+}
+
+// createSession is used to create a new managed session
+func (l *Lock) createSession() (string, error) {
+ session := l.c.Session()
+ se := l.opts.SessionOpts
+ if se == nil {
+ se = &SessionEntry{
+ Name: l.opts.SessionName,
+ TTL: l.opts.SessionTTL,
+ }
+ }
+ w := WriteOptions{Namespace: l.opts.Namespace}
+ id, _, err := session.Create(se, &w)
+ if err != nil {
+ return "", err
+ }
+ return id, nil
+}
+
+// lockEntry returns a formatted KVPair for the lock
+func (l *Lock) lockEntry(session string) *KVPair {
+ return &KVPair{
+ Key: l.opts.Key,
+ Value: l.opts.Value,
+ Session: session,
+ Flags: LockFlagValue,
+ }
+}
+
+// monitorLock is a long running routine to monitor a lock ownership
+// It closes the stopCh if we lose our leadership.
+func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
+ defer close(stopCh)
+ kv := l.c.KV()
+ opts := QueryOptions{
+ RequireConsistent: true,
+ Namespace: l.opts.Namespace,
+ }
+WAIT:
+ retries := l.opts.MonitorRetries
+RETRY:
+ pair, meta, err := kv.Get(l.opts.Key, &opts)
+ if err != nil {
+ // If configured we can try to ride out a brief Consul unavailability
+ // by doing retries. Note that we have to attempt the retry in a non-
+ // blocking fashion so that we have a clean place to reset the retry
+ // counter if service is restored.
+ if retries > 0 && IsRetryableError(err) {
+ time.Sleep(l.opts.MonitorRetryTime)
+ retries--
+ opts.WaitIndex = 0
+ goto RETRY
+ }
+ return
+ }
+ if pair != nil && pair.Session == session {
+ opts.WaitIndex = meta.LastIndex
+ goto WAIT
+ }
+}
diff --git a/vendor/github.com/hashicorp/consul/api/namespace.go b/vendor/github.com/hashicorp/consul/api/namespace.go
new file mode 100644
index 0000000..875af10
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/namespace.go
@@ -0,0 +1,159 @@
+package api
+
+import (
+ "fmt"
+ "time"
+)
+
+// Namespace is the configuration of a single namespace. Namespacing is a Consul Enterprise feature.
+type Namespace struct {
+ // Name is the name of the Namespace. It must be unique and
+ // must be a DNS hostname. There are also other reserved names
+ // that may not be used.
+ Name string `json:"Name"`
+
+ // Description is where the user puts any information they want
+ // about the namespace. It is not used internally.
+ Description string `json:"Description,omitempty"`
+
+ // ACLs is the configuration of ACLs for this namespace. It has its
+ // own struct so that we can add more to it in the future.
+ // This is nullable so that we can omit if empty when encoding in JSON
+ ACLs *NamespaceACLConfig `json:"ACLs,omitempty"`
+
+ // Meta is a map that can be used to add kv metadata to the namespace definition
+ Meta map[string]string `json:"Meta,omitempty"`
+
+ // DeletedAt is the time when the Namespace was marked for deletion
+ // This is nullable so that we can omit if empty when encoding in JSON
+ DeletedAt *time.Time `json:"DeletedAt,omitempty"`
+
+ // CreateIndex is the Raft index at which the Namespace was created
+ CreateIndex uint64 `json:"CreateIndex,omitempty"`
+
+ // ModifyIndex is the latest Raft index at which the Namespace was modified.
+ ModifyIndex uint64 `json:"ModifyIndex,omitempty"`
+}
+
+// NamespaceACLConfig is the Namespace specific ACL configuration container
+type NamespaceACLConfig struct {
+ // PolicyDefaults is the list of policies that should be used for the parent authorizer
+ // of all tokens in the associated namespace.
+ PolicyDefaults []ACLLink `json:"PolicyDefaults"`
+ // RoleDefaults is the list of roles that should be used for the parent authorizer
+ // of all tokens in the associated namespace.
+ RoleDefaults []ACLLink `json:"RoleDefaults"`
+}
+
+// Namespaces can be used to manage Namespaces in Consul Enterprise..
+type Namespaces struct {
+ c *Client
+}
+
+// Operator returns a handle to the operator endpoints.
+func (c *Client) Namespaces() *Namespaces {
+ return &Namespaces{c}
+}
+
+func (n *Namespaces) Create(ns *Namespace, q *WriteOptions) (*Namespace, *WriteMeta, error) {
+ if ns.Name == "" {
+ return nil, nil, fmt.Errorf("Must specify a Name for Namespace creation")
+ }
+
+ r := n.c.newRequest("PUT", "/v1/namespace")
+ r.setWriteOptions(q)
+ r.obj = ns
+ rtt, resp, err := requireOK(n.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out Namespace
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+func (n *Namespaces) Update(ns *Namespace, q *WriteOptions) (*Namespace, *WriteMeta, error) {
+ if ns.Name == "" {
+ return nil, nil, fmt.Errorf("Must specify a Name for Namespace updating")
+ }
+
+ r := n.c.newRequest("PUT", "/v1/namespace/"+ns.Name)
+ r.setWriteOptions(q)
+ r.obj = ns
+ rtt, resp, err := requireOK(n.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ var out Namespace
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+
+ return &out, wm, nil
+}
+
+func (n *Namespaces) Read(name string, q *QueryOptions) (*Namespace, *QueryMeta, error) {
+ var out Namespace
+ r := n.c.newRequest("GET", "/v1/namespace/"+name)
+ r.setQueryOptions(q)
+ found, rtt, resp, err := requireNotFoundOrOK(n.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if !found {
+ return nil, qm, nil
+ }
+
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return &out, qm, nil
+}
+
+func (n *Namespaces) Delete(name string, q *WriteOptions) (*WriteMeta, error) {
+ r := n.c.newRequest("DELETE", "/v1/namespace/"+name)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(n.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+ return wm, nil
+}
+
+func (n *Namespaces) List(q *QueryOptions) ([]*Namespace, *QueryMeta, error) {
+ var out []*Namespace
+ r := n.c.newRequest("GET", "/v1/namespaces")
+ r.setQueryOptions(q)
+ rtt, resp, err := requireOK(n.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go
new file mode 100644
index 0000000..079e224
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator.go
@@ -0,0 +1,11 @@
+package api
+
+// Operator can be used to perform low-level operator tasks for Consul.
+type Operator struct {
+ c *Client
+}
+
+// Operator returns a handle to the operator endpoints.
+func (c *Client) Operator() *Operator {
+ return &Operator{c}
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go
new file mode 100644
index 0000000..5cf7e49
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_area.go
@@ -0,0 +1,194 @@
+package api
+
+// The /v1/operator/area endpoints are available only in Consul Enterprise and
+// interact with its network area subsystem. Network areas are used to link
+// together Consul servers in different Consul datacenters. With network areas,
+// Consul datacenters can be linked together in ways other than a fully-connected
+// mesh, as is required for Consul's WAN.
+
+import (
+ "net"
+ "time"
+)
+
+// Area defines a network area.
+type Area struct {
+ // ID is this identifier for an area (a UUID). This must be left empty
+ // when creating a new area.
+ ID string
+
+ // PeerDatacenter is the peer Consul datacenter that will make up the
+ // other side of this network area. Network areas always involve a pair
+ // of datacenters: the datacenter where the area was created, and the
+ // peer datacenter. This is required.
+ PeerDatacenter string
+
+ // RetryJoin specifies the address of Consul servers to join to, such as
+ // an IPs or hostnames with an optional port number. This is optional.
+ RetryJoin []string
+
+ // UseTLS specifies whether gossip over this area should be encrypted with TLS
+ // if possible.
+ UseTLS bool
+}
+
+// AreaJoinResponse is returned when a join occurs and gives the result for each
+// address.
+type AreaJoinResponse struct {
+ // The address that was joined.
+ Address string
+
+ // Whether or not the join was a success.
+ Joined bool
+
+ // If we couldn't join, this is the message with information.
+ Error string
+}
+
+// SerfMember is a generic structure for reporting information about members in
+// a Serf cluster. This is only used by the area endpoints right now, but this
+// could be expanded to other endpoints in the future.
+type SerfMember struct {
+ // ID is the node identifier (a UUID).
+ ID string
+
+ // Name is the node name.
+ Name string
+
+ // Addr has the IP address.
+ Addr net.IP
+
+ // Port is the RPC port.
+ Port uint16
+
+ // Datacenter is the DC name.
+ Datacenter string
+
+ // Role is "client", "server", or "unknown".
+ Role string
+
+ // Build has the version of the Consul agent.
+ Build string
+
+ // Protocol is the protocol of the Consul agent.
+ Protocol int
+
+ // Status is the Serf health status "none", "alive", "leaving", "left",
+ // or "failed".
+ Status string
+
+ // RTT is the estimated round trip time from the server handling the
+ // request to the this member. This will be negative if no RTT estimate
+ // is available.
+ RTT time.Duration
+}
+
+// AreaCreate will create a new network area. The ID in the given structure must
+// be empty and a generated ID will be returned on success.
+func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) {
+ r := op.c.newRequest("POST", "/v1/operator/area")
+ r.setWriteOptions(q)
+ r.obj = area
+ rtt, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// AreaUpdate will update the configuration of the network area with the given ID.
+func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) {
+ r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID)
+ r.setWriteOptions(q)
+ r.obj = area
+ rtt, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// AreaGet returns a single network area.
+func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) {
+ var out []*Area
+ qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// AreaList returns all the available network areas.
+func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) {
+ var out []*Area
+ qm, err := op.c.query("/v1/operator/area", &out, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// AreaDelete deletes the given network area.
+func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) {
+ r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+ return wm, nil
+}
+
+// AreaJoin attempts to join the given set of join addresses to the given
+// network area. See the Area structure for details about join addresses.
+func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) {
+ r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join")
+ r.setWriteOptions(q)
+ r.obj = addresses
+ rtt, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ var out []*AreaJoinResponse
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, nil, err
+ }
+ return out, wm, nil
+}
+
+// AreaMembers lists the Serf information about the members in the given area.
+func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) {
+ var out []*SerfMember
+ qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
new file mode 100644
index 0000000..0e4ef24
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
@@ -0,0 +1,232 @@
+package api
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// AutopilotConfiguration is used for querying/setting the Autopilot configuration.
+// Autopilot helps manage operator tasks related to Consul servers like removing
+// failed servers from the Raft quorum.
+type AutopilotConfiguration struct {
+ // CleanupDeadServers controls whether to remove dead servers from the Raft
+ // peer list when a new server joins
+ CleanupDeadServers bool
+
+ // LastContactThreshold is the limit on the amount of time a server can go
+ // without leader contact before being considered unhealthy.
+ LastContactThreshold *ReadableDuration
+
+ // MaxTrailingLogs is the amount of entries in the Raft Log that a server can
+ // be behind before being considered unhealthy.
+ MaxTrailingLogs uint64
+
+ // MinQuorum sets the minimum number of servers allowed in a cluster before
+ // autopilot can prune dead servers.
+ MinQuorum uint
+
+ // ServerStabilizationTime is the minimum amount of time a server must be
+ // in a stable, healthy state before it can be added to the cluster. Only
+ // applicable with Raft protocol version 3 or higher.
+ ServerStabilizationTime *ReadableDuration
+
+ // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
+ // servers into zones for redundancy. If left blank, this feature will be disabled.
+ RedundancyZoneTag string
+
+ // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
+ // strategy of waiting until enough newer-versioned servers have been added to the
+ // cluster before promoting them to voters.
+ DisableUpgradeMigration bool
+
+ // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when
+ // performing upgrade migrations. If left blank, the Consul version will be used.
+ UpgradeVersionTag string
+
+ // CreateIndex holds the index corresponding the creation of this configuration.
+ // This is a read-only field.
+ CreateIndex uint64
+
+ // ModifyIndex will be set to the index of the last update when retrieving the
+ // Autopilot configuration. Resubmitting a configuration with
+ // AutopilotCASConfiguration will perform a check-and-set operation which ensures
+ // there hasn't been a subsequent update since the configuration was retrieved.
+ ModifyIndex uint64
+}
+
+// ServerHealth is the health (from the leader's point of view) of a server.
+type ServerHealth struct {
+ // ID is the raft ID of the server.
+ ID string
+
+ // Name is the node name of the server.
+ Name string
+
+ // Address is the address of the server.
+ Address string
+
+ // The status of the SerfHealth check for the server.
+ SerfStatus string
+
+ // Version is the Consul version of the server.
+ Version string
+
+ // Leader is whether this server is currently the leader.
+ Leader bool
+
+ // LastContact is the time since this node's last contact with the leader.
+ LastContact *ReadableDuration
+
+ // LastTerm is the highest leader term this server has a record of in its Raft log.
+ LastTerm uint64
+
+ // LastIndex is the last log index this server has a record of in its Raft log.
+ LastIndex uint64
+
+ // Healthy is whether or not the server is healthy according to the current
+ // Autopilot config.
+ Healthy bool
+
+ // Voter is whether this is a voting server.
+ Voter bool
+
+ // StableSince is the last time this server's Healthy value changed.
+ StableSince time.Time
+}
+
+// OperatorHealthReply is a representation of the overall health of the cluster
+type OperatorHealthReply struct {
+ // Healthy is true if all the servers in the cluster are healthy.
+ Healthy bool
+
+ // FailureTolerance is the number of healthy servers that could be lost without
+ // an outage occurring.
+ FailureTolerance int
+
+ // Servers holds the health of each server.
+ Servers []ServerHealth
+}
+
+// ReadableDuration is a duration type that is serialized to JSON in human readable format.
+type ReadableDuration time.Duration
+
+func NewReadableDuration(dur time.Duration) *ReadableDuration {
+ d := ReadableDuration(dur)
+ return &d
+}
+
+func (d *ReadableDuration) String() string {
+ return d.Duration().String()
+}
+
+func (d *ReadableDuration) Duration() time.Duration {
+ if d == nil {
+ return time.Duration(0)
+ }
+ return time.Duration(*d)
+}
+
+func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
+}
+
+func (d *ReadableDuration) UnmarshalJSON(raw []byte) (err error) {
+ if d == nil {
+ return fmt.Errorf("cannot unmarshal to nil pointer")
+ }
+
+ var dur time.Duration
+ str := string(raw)
+ if len(str) >= 2 && str[0] == '"' && str[len(str)-1] == '"' {
+ // quoted string
+ dur, err = time.ParseDuration(str[1 : len(str)-1])
+ if err != nil {
+ return err
+ }
+ } else {
+ // no quotes, not a string
+ v, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ return err
+ }
+ dur = time.Duration(v)
+ }
+
+ *d = ReadableDuration(dur)
+ return nil
+}
+
+// AutopilotGetConfiguration is used to query the current Autopilot configuration.
+func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) {
+ r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration")
+ r.setQueryOptions(q)
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out AutopilotConfiguration
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+
+ return &out, nil
+}
+
+// AutopilotSetConfiguration is used to set the current Autopilot configuration.
+func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error {
+ r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
+ r.setWriteOptions(q)
+ r.obj = conf
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// AutopilotCASConfiguration is used to perform a Check-And-Set update on the
+// Autopilot configuration. The ModifyIndex value will be respected. Returns
+// true on success or false on failures.
+func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) {
+ r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
+ r.setWriteOptions(q)
+ r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10))
+ r.obj = conf
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return false, err
+ }
+ defer resp.Body.Close()
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, resp.Body); err != nil {
+ return false, fmt.Errorf("Failed to read response: %v", err)
+ }
+ res := strings.Contains(buf.String(), "true")
+
+ return res, nil
+}
+
+// AutopilotServerHealth
+func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
+ r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
+ r.setQueryOptions(q)
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out OperatorHealthReply
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go
new file mode 100644
index 0000000..5b80f9f
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go
@@ -0,0 +1,92 @@
+package api
+
+// keyringRequest is used for performing Keyring operations
+type keyringRequest struct {
+ Key string
+}
+
+// KeyringResponse is returned when listing the gossip encryption keys
+type KeyringResponse struct {
+ // Whether this response is for a WAN ring
+ WAN bool
+
+ // The datacenter name this request corresponds to
+ Datacenter string
+
+ // Segment has the network segment this request corresponds to.
+ Segment string
+
+ // Messages has information or errors from serf
+ Messages map[string]string `json:",omitempty"`
+
+ // A map of the encryption keys to the number of nodes they're installed on
+ Keys map[string]int
+
+ // A map of the encryption primary keys to the number of nodes they're installed on
+ PrimaryKeys map[string]int
+
+ // The total number of nodes in this ring
+ NumNodes int
+}
+
+// KeyringInstall is used to install a new gossip encryption key into the cluster
+func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
+ r := op.c.newRequest("POST", "/v1/operator/keyring")
+ r.setWriteOptions(q)
+ r.obj = keyringRequest{
+ Key: key,
+ }
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// KeyringList is used to list the gossip keys installed in the cluster
+func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
+ r := op.c.newRequest("GET", "/v1/operator/keyring")
+ r.setQueryOptions(q)
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out []*KeyringResponse
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// KeyringRemove is used to remove a gossip encryption key from the cluster
+func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
+ r := op.c.newRequest("DELETE", "/v1/operator/keyring")
+ r.setWriteOptions(q)
+ r.obj = keyringRequest{
+ Key: key,
+ }
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
+
+// KeyringUse is used to change the active gossip encryption key
+func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
+ r := op.c.newRequest("PUT", "/v1/operator/keyring")
+ r.setWriteOptions(q)
+ r.obj = keyringRequest{
+ Key: key,
+ }
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go
new file mode 100644
index 0000000..51b64ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_license.go
@@ -0,0 +1,114 @@
+package api
+
+import (
+ "io/ioutil"
+ "strings"
+ "time"
+)
+
+type License struct {
+ // The unique identifier of the license
+ LicenseID string `json:"license_id"`
+
+ // The customer ID associated with the license
+ CustomerID string `json:"customer_id"`
+
+ // If set, an identifier that should be used to lock the license to a
+ // particular site, cluster, etc.
+ InstallationID string `json:"installation_id"`
+
+ // The time at which the license was issued
+ IssueTime time.Time `json:"issue_time"`
+
+ // The time at which the license starts being valid
+ StartTime time.Time `json:"start_time"`
+
+ // The time after which the license expires
+ ExpirationTime time.Time `json:"expiration_time"`
+
+ // The time at which the license ceases to function and can
+ // no longer be used in any capacity
+ TerminationTime time.Time `json:"termination_time"`
+
+ // The product the license is valid for
+ Product string `json:"product"`
+
+ // License Specific Flags
+ Flags map[string]interface{} `json:"flags"`
+
+ // Modules is a list of the licensed enterprise modules
+ Modules []string `json:"modules"`
+
+ // List of features enabled by the license
+ Features []string `json:"features"`
+}
+
+type LicenseReply struct {
+ Valid bool
+ License *License
+ Warnings []string
+}
+
+func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, error) {
+ var reply LicenseReply
+ if _, err := op.c.query("/v1/operator/license", &reply, q); err != nil {
+ return nil, err
+ } else {
+ return &reply, nil
+ }
+}
+
+func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) {
+ r := op.c.newRequest("GET", "/v1/operator/license")
+ r.params.Set("signed", "1")
+ r.setQueryOptions(q)
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ data, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ return string(data), nil
+}
+
+// LicenseReset will reset the license to the builtin one if it is still valid.
+// If the builtin license is invalid, the current license stays active.
+func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) {
+ var reply LicenseReply
+ r := op.c.newRequest("DELETE", "/v1/operator/license")
+ r.setWriteOptions(opts)
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if err := decodeBody(resp, &reply); err != nil {
+ return nil, err
+ }
+
+ return &reply, nil
+}
+
+func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) {
+ var reply LicenseReply
+ r := op.c.newRequest("PUT", "/v1/operator/license")
+ r.setWriteOptions(opts)
+ r.body = strings.NewReader(license)
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ if err := decodeBody(resp, &reply); err != nil {
+ return nil, err
+ }
+
+ return &reply, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go
new file mode 100644
index 0000000..c6d7165
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go
@@ -0,0 +1,89 @@
+package api
+
+// RaftServer has information about a server in the Raft configuration.
+type RaftServer struct {
+ // ID is the unique ID for the server. These are currently the same
+ // as the address, but they will be changed to a real GUID in a future
+ // release of Consul.
+ ID string
+
+ // Node is the node name of the server, as known by Consul, or this
+ // will be set to "(unknown)" otherwise.
+ Node string
+
+ // Address is the IP:port of the server, used for Raft communications.
+ Address string
+
+ // Leader is true if this server is the current cluster leader.
+ Leader bool
+
+ // Protocol version is the raft protocol version used by the server
+ ProtocolVersion string
+
+ // Voter is true if this server has a vote in the cluster. This might
+ // be false if the server is staging and still coming online, or if
+ // it's a non-voting server, which will be added in a future release of
+ // Consul.
+ Voter bool
+}
+
+// RaftConfiguration is returned when querying for the current Raft configuration.
+type RaftConfiguration struct {
+ // Servers has the list of servers in the Raft configuration.
+ Servers []*RaftServer
+
+ // Index has the Raft index of this configuration.
+ Index uint64
+}
+
+// RaftGetConfiguration is used to query the current Raft peer set.
+func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
+ r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
+ r.setQueryOptions(q)
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var out RaftConfiguration
+ if err := decodeBody(resp, &out); err != nil {
+ return nil, err
+ }
+ return &out, nil
+}
+
+// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
+// quorum but no longer known to Serf or the catalog) by address in the form of
+// "IP:port".
+func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
+ r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
+ r.setWriteOptions(q)
+
+ r.params.Set("address", address)
+
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+
+ resp.Body.Close()
+ return nil
+}
+
+// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft
+// quorum but no longer known to Serf or the catalog) by ID.
+func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
+ r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
+ r.setWriteOptions(q)
+
+ r.params.Set("id", id)
+
+ _, resp, err := requireOK(op.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+
+ resp.Body.Close()
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go
new file mode 100644
index 0000000..92b05d3
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go
@@ -0,0 +1,11 @@
+package api
+
+// SegmentList returns all the available LAN segments.
+func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) {
+ var out []string
+ qm, err := op.c.query("/v1/operator/segment", &out, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go
new file mode 100644
index 0000000..5ac2535
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go
@@ -0,0 +1,223 @@
+package api
+
+// QueryDatacenterOptions sets options about how we fail over if there are no
+// healthy nodes in the local datacenter.
+type QueryDatacenterOptions struct {
+ // NearestN is set to the number of remote datacenters to try, based on
+ // network coordinates.
+ NearestN int
+
+ // Datacenters is a fixed list of datacenters to try after NearestN. We
+ // never try a datacenter multiple times, so those are subtracted from
+ // this list before proceeding.
+ Datacenters []string
+}
+
+// QueryDNSOptions controls settings when query results are served over DNS.
+type QueryDNSOptions struct {
+ // TTL is the time to live for the served DNS results.
+ TTL string
+}
+
+// ServiceQuery is used to query for a set of healthy nodes offering a specific
+// service.
+type ServiceQuery struct {
+ // Service is the service to query.
+ Service string
+
+ // Namespace of the service to query
+ Namespace string `json:",omitempty"`
+
+ // Near allows baking in the name of a node to automatically distance-
+ // sort from. The magic "_agent" value is supported, which sorts near
+ // the agent which initiated the request by default.
+ Near string
+
+ // Failover controls what we do if there are no healthy nodes in the
+ // local datacenter.
+ Failover QueryDatacenterOptions
+
+ // IgnoreCheckIDs is an optional list of health check IDs to ignore when
+ // considering which nodes are healthy. It is useful as an emergency measure
+ // to temporarily override some health check that is producing false negatives
+ // for example.
+ IgnoreCheckIDs []string
+
+ // If OnlyPassing is true then we will only include nodes with passing
+ // health checks (critical AND warning checks will cause a node to be
+ // discarded)
+ OnlyPassing bool
+
+ // Tags are a set of required and/or disallowed tags. If a tag is in
+ // this list it must be present. If the tag is preceded with "!" then
+ // it is disallowed.
+ Tags []string
+
+ // NodeMeta is a map of required node metadata fields. If a key/value
+ // pair is in this map it must be present on the node in order for the
+ // service entry to be returned.
+ NodeMeta map[string]string
+
+ // ServiceMeta is a map of required service metadata fields. If a key/value
+ // pair is in this map it must be present on the node in order for the
+ // service entry to be returned.
+ ServiceMeta map[string]string
+
+ // Connect if true will filter the prepared query results to only
+ // include Connect-capable services. These include both native services
+ // and proxies for matching services. Note that if a proxy matches,
+ // the constraints in the query above (Near, OnlyPassing, etc.) apply
+ // to the _proxy_ and not the service being proxied. In practice, proxies
+ // should be directly next to their services so this isn't an issue.
+ Connect bool
+}
+
+// QueryTemplate carries the arguments for creating a templated query.
+type QueryTemplate struct {
+ // Type specifies the type of the query template. Currently only
+ // "name_prefix_match" is supported. This field is required.
+ Type string
+
+ // Regexp allows specifying a regex pattern to match against the name
+ // of the query being executed.
+ Regexp string
+}
+
+// PreparedQueryDefinition defines a complete prepared query.
+type PreparedQueryDefinition struct {
+ // ID is this UUID-based ID for the query, always generated by Consul.
+ ID string
+
+ // Name is an optional friendly name for the query supplied by the
+ // user. NOTE - if this feature is used then it will reduce the security
+ // of any read ACL associated with this query/service since this name
+ // can be used to locate nodes with supplying any ACL.
+ Name string
+
+ // Session is an optional session to tie this query's lifetime to. If
+ // this is omitted then the query will not expire.
+ Session string
+
+ // Token is the ACL token used when the query was created, and it is
+ // used when a query is subsequently executed. This token, or a token
+ // with management privileges, must be used to change the query later.
+ Token string
+
+ // Service defines a service query (leaving things open for other types
+ // later).
+ Service ServiceQuery
+
+ // DNS has options that control how the results of this query are
+ // served over DNS.
+ DNS QueryDNSOptions
+
+ // Template is used to pass through the arguments for creating a
+ // prepared query with an attached template. If a template is given,
+ // interpolations are possible in other struct fields.
+ Template QueryTemplate
+}
+
+// PreparedQueryExecuteResponse has the results of executing a query.
+type PreparedQueryExecuteResponse struct {
+ // Service is the service that was queried.
+ Service string
+
+ // Namespace of the service that was queried
+ Namespace string `json:",omitempty"`
+
+ // Nodes has the nodes that were output by the query.
+ Nodes []ServiceEntry
+
+ // DNS has the options for serving these results over DNS.
+ DNS QueryDNSOptions
+
+ // Datacenter is the datacenter that these results came from.
+ Datacenter string
+
+ // Failovers is a count of how many times we had to query a remote
+ // datacenter.
+ Failovers int
+}
+
+// PreparedQuery can be used to query the prepared query endpoints.
+type PreparedQuery struct {
+ c *Client
+}
+
+// PreparedQuery returns a handle to the prepared query endpoints.
+func (c *Client) PreparedQuery() *PreparedQuery {
+ return &PreparedQuery{c}
+}
+
+// Create makes a new prepared query. The ID of the new query is returned.
+func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) {
+ r := c.c.newRequest("POST", "/v1/query")
+ r.setWriteOptions(q)
+ r.obj = query
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return "", nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+
+ var out struct{ ID string }
+ if err := decodeBody(resp, &out); err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Update makes updates to an existing prepared query.
+func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) {
+ return c.c.write("/v1/query/"+query.ID, query, nil, q)
+}
+
+// List is used to fetch all the prepared queries (always requires a management
+// token).
+func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
+ var out []*PreparedQueryDefinition
+ qm, err := c.c.query("/v1/query", &out, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Get is used to fetch a specific prepared query.
+func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
+ var out []*PreparedQueryDefinition
+ qm, err := c.c.query("/v1/query/"+queryID, &out, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
+
+// Delete is used to delete a specific prepared query.
+func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) {
+ r := c.c.newRequest("DELETE", "/v1/query/"+queryID)
+ r.setWriteOptions(q)
+ rtt, resp, err := requireOK(c.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{}
+ wm.RequestTime = rtt
+ return wm, nil
+}
+
+// Execute is used to execute a specific prepared query. You can execute using
+// a query ID or name.
+func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) {
+ var out *PreparedQueryExecuteResponse
+ qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return out, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go
new file mode 100644
index 0000000..745a208
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/raw.go
@@ -0,0 +1,24 @@
+package api
+
+// Raw can be used to do raw queries against custom endpoints
+type Raw struct {
+ c *Client
+}
+
+// Raw returns a handle to query endpoints
+func (c *Client) Raw() *Raw {
+ return &Raw{c}
+}
+
+// Query is used to do a GET request against an endpoint
+// and deserialize the response into an interface using
+// standard Consul conventions.
+func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
+ return raw.c.query(endpoint, out, q)
+}
+
+// Write is used to do a PUT request against an endpoint
+// and serialize/deserialized using the standard Consul conventions.
+func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
+ return raw.c.write(endpoint, in, out, q)
+}
diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go
new file mode 100644
index 0000000..066ce33
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/semaphore.go
@@ -0,0 +1,530 @@
+package api
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+ "sync"
+ "time"
+)
+
+const (
+ // DefaultSemaphoreSessionName is the Session Name we assign if none is provided
+ DefaultSemaphoreSessionName = "Consul API Semaphore"
+
+ // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
+ // when creating a new Semaphore. This is used because we do not have another
+ // other check to depend upon.
+ DefaultSemaphoreSessionTTL = "15s"
+
+ // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
+ // acquisition is possible. This affects the minimum time it takes to cancel
+ // a Semaphore acquisition.
+ DefaultSemaphoreWaitTime = 15 * time.Second
+
+ // DefaultSemaphoreKey is the key used within the prefix to
+ // use for coordination between all the contenders.
+ DefaultSemaphoreKey = ".lock"
+
+ // SemaphoreFlagValue is a magic flag we set to indicate a key
+ // is being used for a semaphore. It is used to detect a potential
+ // conflict with a lock.
+ SemaphoreFlagValue = 0xe0f69a2baa414de0
+)
+
+var (
+ // ErrSemaphoreHeld is returned if we attempt to double lock
+ ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
+
+ // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
+ // that we do not hold.
+ ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
+
+ // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
+ // that is in use.
+ ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
+
+ // ErrSemaphoreConflict is returned if the flags on a key
+ // used for a semaphore do not match expectation
+ ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
+)
+
+// Semaphore is used to implement a distributed semaphore
+// using the Consul KV primitives.
+type Semaphore struct {
+ c *Client
+ opts *SemaphoreOptions
+
+ isHeld bool
+ sessionRenew chan struct{}
+ lockSession string
+ l sync.Mutex
+}
+
+// SemaphoreOptions is used to parameterize the Semaphore
+type SemaphoreOptions struct {
+ Prefix string // Must be set and have write permissions
+ Limit int // Must be set, and be positive
+ Value []byte // Optional, value to associate with the contender entry
+ Session string // Optional, created if not specified
+ SessionName string // Optional, defaults to DefaultLockSessionName
+ SessionTTL string // Optional, defaults to DefaultLockSessionTTL
+ MonitorRetries int // Optional, defaults to 0 which means no retries
+ MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
+ SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
+ SemaphoreTryOnce bool // Optional, defaults to false which means try forever
+ Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace
+}
+
+// semaphoreLock is written under the DefaultSemaphoreKey and
+// is used to coordinate between all the contenders.
+type semaphoreLock struct {
+ // Limit is the integer limit of holders. This is used to
+ // verify that all the holders agree on the value.
+ Limit int
+
+ // Holders is a list of all the semaphore holders.
+ // It maps the session ID to true. It is used as a set effectively.
+ Holders map[string]bool
+}
+
+// SemaphorePrefix is used to created a Semaphore which will operate
+// at the given KV prefix and uses the given limit for the semaphore.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders.
+func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
+ opts := &SemaphoreOptions{
+ Prefix: prefix,
+ Limit: limit,
+ }
+ return c.SemaphoreOpts(opts)
+}
+
+// SemaphoreOpts is used to create a Semaphore with the given options.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders. If a Session is not provided, one will be created.
+func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
+ if opts.Prefix == "" {
+ return nil, fmt.Errorf("missing prefix")
+ }
+ if opts.Limit <= 0 {
+ return nil, fmt.Errorf("semaphore limit must be positive")
+ }
+ if opts.SessionName == "" {
+ opts.SessionName = DefaultSemaphoreSessionName
+ }
+ if opts.SessionTTL == "" {
+ opts.SessionTTL = DefaultSemaphoreSessionTTL
+ } else {
+ if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+ return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+ }
+ }
+ if opts.MonitorRetryTime == 0 {
+ opts.MonitorRetryTime = DefaultMonitorRetryTime
+ }
+ if opts.SemaphoreWaitTime == 0 {
+ opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
+ }
+ s := &Semaphore{
+ c: c,
+ opts: opts,
+ }
+ return s, nil
+}
+
+// Acquire attempts to reserve a slot in the semaphore, blocking until
+// success, interrupted via the stopCh or an error is encountered.
+// Providing a non-nil stopCh can be used to abort the attempt.
+// On success, a channel is returned that represents our slot.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the slot is held until Release() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the session being lost.
+func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
+ // Hold the lock as we try to acquire
+ s.l.Lock()
+ defer s.l.Unlock()
+
+ // Check if we already hold the semaphore
+ if s.isHeld {
+ return nil, ErrSemaphoreHeld
+ }
+
+ // Check if we need to create a session first
+ s.lockSession = s.opts.Session
+ if s.lockSession == "" {
+ sess, err := s.createSession()
+ if err != nil {
+ return nil, fmt.Errorf("failed to create session: %v", err)
+ }
+
+ s.sessionRenew = make(chan struct{})
+ s.lockSession = sess
+ session := s.c.Session()
+ go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
+
+ // If we fail to acquire the lock, cleanup the session
+ defer func() {
+ if !s.isHeld {
+ close(s.sessionRenew)
+ s.sessionRenew = nil
+ }
+ }()
+ }
+
+ // Create the contender entry
+ kv := s.c.KV()
+ wOpts := WriteOptions{Namespace: s.opts.Namespace}
+
+ made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), &wOpts)
+ if err != nil || !made {
+ return nil, fmt.Errorf("failed to make contender entry: %v", err)
+ }
+
+ // Setup the query options
+ qOpts := QueryOptions{
+ WaitTime: s.opts.SemaphoreWaitTime,
+ Namespace: s.opts.Namespace,
+ }
+
+ start := time.Now()
+ attempts := 0
+WAIT:
+ // Check if we should quit
+ select {
+ case <-stopCh:
+ return nil, nil
+ default:
+ }
+
+ // Handle the one-shot mode.
+ if s.opts.SemaphoreTryOnce && attempts > 0 {
+ elapsed := time.Since(start)
+ if elapsed > s.opts.SemaphoreWaitTime {
+ return nil, nil
+ }
+
+ // Query wait time should not exceed the semaphore wait time
+ qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed
+ }
+ attempts++
+
+ // Read the prefix
+ pairs, meta, err := kv.List(s.opts.Prefix, &qOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read prefix: %v", err)
+ }
+
+ // Decode the lock
+ lockPair := s.findLock(pairs)
+ if lockPair.Flags != SemaphoreFlagValue {
+ return nil, ErrSemaphoreConflict
+ }
+ lock, err := s.decodeLock(lockPair)
+ if err != nil {
+ return nil, err
+ }
+
+ // Verify we agree with the limit
+ if lock.Limit != s.opts.Limit {
+ return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
+ lock.Limit, s.opts.Limit)
+ }
+
+ // Prune the dead holders
+ s.pruneDeadHolders(lock, pairs)
+
+ // Check if the lock is held
+ if len(lock.Holders) >= lock.Limit {
+ qOpts.WaitIndex = meta.LastIndex
+ goto WAIT
+ }
+
+ // Create a new lock with us as a holder
+ lock.Holders[s.lockSession] = true
+ newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ // Attempt the acquisition
+ didSet, _, err := kv.CAS(newLock, &wOpts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to update lock: %v", err)
+ }
+ if !didSet {
+ // Update failed, could have been a race with another contender,
+ // retry the operation
+ goto WAIT
+ }
+
+ // Watch to ensure we maintain ownership of the slot
+ lockCh := make(chan struct{})
+ go s.monitorLock(s.lockSession, lockCh)
+
+ // Set that we own the lock
+ s.isHeld = true
+
+ // Acquired! All done
+ return lockCh, nil
+}
+
+// Release is used to voluntarily give up our semaphore slot. It is
+// an error to call this if the semaphore has not been acquired.
+func (s *Semaphore) Release() error {
+ // Hold the lock as we try to release
+ s.l.Lock()
+ defer s.l.Unlock()
+
+ // Ensure the lock is actually held
+ if !s.isHeld {
+ return ErrSemaphoreNotHeld
+ }
+
+ // Set that we no longer own the lock
+ s.isHeld = false
+
+ // Stop the session renew
+ if s.sessionRenew != nil {
+ defer func() {
+ close(s.sessionRenew)
+ s.sessionRenew = nil
+ }()
+ }
+
+ // Get and clear the lock session
+ lockSession := s.lockSession
+ s.lockSession = ""
+
+ // Remove ourselves as a lock holder
+ kv := s.c.KV()
+ key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+
+ wOpts := WriteOptions{Namespace: s.opts.Namespace}
+ qOpts := QueryOptions{Namespace: s.opts.Namespace}
+
+READ:
+ pair, _, err := kv.Get(key, &qOpts)
+ if err != nil {
+ return err
+ }
+ if pair == nil {
+ pair = &KVPair{}
+ }
+ lock, err := s.decodeLock(pair)
+ if err != nil {
+ return err
+ }
+
+ // Create a new lock without us as a holder
+ if _, ok := lock.Holders[lockSession]; ok {
+ delete(lock.Holders, lockSession)
+ newLock, err := s.encodeLock(lock, pair.ModifyIndex)
+ if err != nil {
+ return err
+ }
+
+ // Swap the locks
+ didSet, _, err := kv.CAS(newLock, &wOpts)
+ if err != nil {
+ return fmt.Errorf("failed to update lock: %v", err)
+ }
+ if !didSet {
+ goto READ
+ }
+ }
+
+ // Destroy the contender entry
+ contenderKey := path.Join(s.opts.Prefix, lockSession)
+ if _, err := kv.Delete(contenderKey, &wOpts); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Destroy is used to cleanup the semaphore entry. It is not necessary
+// to invoke. It will fail if the semaphore is in use.
+func (s *Semaphore) Destroy() error {
+ // Hold the lock as we try to acquire
+ s.l.Lock()
+ defer s.l.Unlock()
+
+ // Check if we already hold the semaphore
+ if s.isHeld {
+ return ErrSemaphoreHeld
+ }
+
+ // List for the semaphore
+ kv := s.c.KV()
+
+ q := QueryOptions{Namespace: s.opts.Namespace}
+ pairs, _, err := kv.List(s.opts.Prefix, &q)
+ if err != nil {
+ return fmt.Errorf("failed to read prefix: %v", err)
+ }
+
+ // Find the lock pair, bail if it doesn't exist
+ lockPair := s.findLock(pairs)
+ if lockPair.ModifyIndex == 0 {
+ return nil
+ }
+ if lockPair.Flags != SemaphoreFlagValue {
+ return ErrSemaphoreConflict
+ }
+
+ // Decode the lock
+ lock, err := s.decodeLock(lockPair)
+ if err != nil {
+ return err
+ }
+
+ // Prune the dead holders
+ s.pruneDeadHolders(lock, pairs)
+
+ // Check if there are any holders
+ if len(lock.Holders) > 0 {
+ return ErrSemaphoreInUse
+ }
+
+ // Attempt the delete
+ w := WriteOptions{Namespace: s.opts.Namespace}
+ didRemove, _, err := kv.DeleteCAS(lockPair, &w)
+ if err != nil {
+ return fmt.Errorf("failed to remove semaphore: %v", err)
+ }
+ if !didRemove {
+ return ErrSemaphoreInUse
+ }
+ return nil
+}
+
+// createSession is used to create a new managed session
+func (s *Semaphore) createSession() (string, error) {
+ session := s.c.Session()
+ se := &SessionEntry{
+ Name: s.opts.SessionName,
+ TTL: s.opts.SessionTTL,
+ Behavior: SessionBehaviorDelete,
+ }
+
+ w := WriteOptions{Namespace: s.opts.Namespace}
+ id, _, err := session.Create(se, &w)
+ if err != nil {
+ return "", err
+ }
+ return id, nil
+}
+
+// contenderEntry returns a formatted KVPair for the contender
+func (s *Semaphore) contenderEntry(session string) *KVPair {
+ return &KVPair{
+ Key: path.Join(s.opts.Prefix, session),
+ Value: s.opts.Value,
+ Session: session,
+ Flags: SemaphoreFlagValue,
+ }
+}
+
+// findLock is used to find the KV Pair which is used for coordination
+func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
+ key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+ for _, pair := range pairs {
+ if pair.Key == key {
+ return pair
+ }
+ }
+ return &KVPair{Flags: SemaphoreFlagValue}
+}
+
+// decodeLock is used to decode a semaphoreLock from an
+// entry in Consul
+func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
+ // Handle if there is no lock
+ if pair == nil || pair.Value == nil {
+ return &semaphoreLock{
+ Limit: s.opts.Limit,
+ Holders: make(map[string]bool),
+ }, nil
+ }
+
+ l := &semaphoreLock{}
+ if err := json.Unmarshal(pair.Value, l); err != nil {
+ return nil, fmt.Errorf("lock decoding failed: %v", err)
+ }
+ return l, nil
+}
+
+// encodeLock is used to encode a semaphoreLock into a KVPair
+// that can be PUT
+func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
+ enc, err := json.Marshal(l)
+ if err != nil {
+ return nil, fmt.Errorf("lock encoding failed: %v", err)
+ }
+ pair := &KVPair{
+ Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey),
+ Value: enc,
+ Flags: SemaphoreFlagValue,
+ ModifyIndex: oldIndex,
+ }
+ return pair, nil
+}
+
+// pruneDeadHolders is used to remove all the dead lock holders
+func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
+ // Gather all the live holders
+ alive := make(map[string]struct{}, len(pairs))
+ for _, pair := range pairs {
+ if pair.Session != "" {
+ alive[pair.Session] = struct{}{}
+ }
+ }
+
+ // Remove any holders that are dead
+ for holder := range lock.Holders {
+ if _, ok := alive[holder]; !ok {
+ delete(lock.Holders, holder)
+ }
+ }
+}
+
+// monitorLock is a long running routine to monitor a semaphore ownership
+// It closes the stopCh if we lose our slot.
+func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
+ defer close(stopCh)
+ kv := s.c.KV()
+ opts := QueryOptions{
+ RequireConsistent: true,
+ Namespace: s.opts.Namespace,
+ }
+WAIT:
+ retries := s.opts.MonitorRetries
+RETRY:
+ pairs, meta, err := kv.List(s.opts.Prefix, &opts)
+ if err != nil {
+ // If configured we can try to ride out a brief Consul unavailability
+ // by doing retries. Note that we have to attempt the retry in a non-
+ // blocking fashion so that we have a clean place to reset the retry
+ // counter if service is restored.
+ if retries > 0 && IsRetryableError(err) {
+ time.Sleep(s.opts.MonitorRetryTime)
+ retries--
+ opts.WaitIndex = 0
+ goto RETRY
+ }
+ return
+ }
+ lockPair := s.findLock(pairs)
+ lock, err := s.decodeLock(lockPair)
+ if err != nil {
+ return
+ }
+ s.pruneDeadHolders(lock, pairs)
+ if _, ok := lock.Holders[session]; ok {
+ opts.WaitIndex = meta.LastIndex
+ goto WAIT
+ }
+}
diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go
new file mode 100644
index 0000000..157ad53
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/session.go
@@ -0,0 +1,243 @@
+package api
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+const (
+ // SessionBehaviorRelease is the default behavior and causes
+ // all associated locks to be released on session invalidation.
+ SessionBehaviorRelease = "release"
+
+ // SessionBehaviorDelete is new in Consul 0.5 and changes the
+ // behavior to delete all associated locks on session invalidation.
+ // It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
+ SessionBehaviorDelete = "delete"
+)
+
+var ErrSessionExpired = errors.New("session expired")
+
+// SessionEntry represents a session in consul
+type SessionEntry struct {
+ CreateIndex uint64
+ ID string
+ Name string
+ Node string
+ LockDelay time.Duration
+ Behavior string
+ TTL string
+ Namespace string `json:",omitempty"`
+
+ // Deprecated for Consul Enterprise in v1.7.0.
+ Checks []string
+
+ // NodeChecks and ServiceChecks are new in Consul 1.7.0.
+ // When associating checks with sessions, namespaces can be specified for service checks.
+ NodeChecks []string
+ ServiceChecks []ServiceCheck
+}
+
+type ServiceCheck struct {
+ ID string
+ Namespace string
+}
+
+// Session can be used to query the Session endpoints
+type Session struct {
+ c *Client
+}
+
+// Session returns a handle to the session endpoints
+func (c *Client) Session() *Session {
+ return &Session{c}
+}
+
+// CreateNoChecks is like Create but is used specifically to create
+// a session with no associated health checks.
+func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ body := make(map[string]interface{})
+ body["NodeChecks"] = []string{}
+ if se != nil {
+ if se.Name != "" {
+ body["Name"] = se.Name
+ }
+ if se.Node != "" {
+ body["Node"] = se.Node
+ }
+ if se.LockDelay != 0 {
+ body["LockDelay"] = durToMsec(se.LockDelay)
+ }
+ if se.Behavior != "" {
+ body["Behavior"] = se.Behavior
+ }
+ if se.TTL != "" {
+ body["TTL"] = se.TTL
+ }
+ }
+ return s.create(body, q)
+
+}
+
+// Create makes a new session. Providing a session entry can
+// customize the session. It can also be nil to use defaults.
+func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+ var obj interface{}
+ if se != nil {
+ body := make(map[string]interface{})
+ obj = body
+ if se.Name != "" {
+ body["Name"] = se.Name
+ }
+ if se.Node != "" {
+ body["Node"] = se.Node
+ }
+ if se.LockDelay != 0 {
+ body["LockDelay"] = durToMsec(se.LockDelay)
+ }
+ if len(se.Checks) > 0 {
+ body["Checks"] = se.Checks
+ }
+ if len(se.NodeChecks) > 0 {
+ body["NodeChecks"] = se.NodeChecks
+ }
+ if len(se.ServiceChecks) > 0 {
+ body["ServiceChecks"] = se.ServiceChecks
+ }
+ if se.Behavior != "" {
+ body["Behavior"] = se.Behavior
+ }
+ if se.TTL != "" {
+ body["TTL"] = se.TTL
+ }
+ }
+ return s.create(obj, q)
+}
+
+func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
+ var out struct{ ID string }
+ wm, err := s.c.write("/v1/session/create", obj, &out, q)
+ if err != nil {
+ return "", nil, err
+ }
+ return out.ID, wm, nil
+}
+
+// Destroy invalidates a given session
+func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+ wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
+ if err != nil {
+ return nil, err
+ }
+ return wm, nil
+}
+
+// Renew renews the TTL on a given session
+func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
+ r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
+ r.setWriteOptions(q)
+ rtt, resp, err := s.c.doRequest(r)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ wm := &WriteMeta{RequestTime: rtt}
+
+ if resp.StatusCode == 404 {
+ return nil, wm, nil
+ } else if resp.StatusCode != 200 {
+ return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+ }
+
+ var entries []*SessionEntry
+ if err := decodeBody(resp, &entries); err != nil {
+ return nil, nil, fmt.Errorf("Failed to read response: %v", err)
+ }
+ if len(entries) > 0 {
+ return entries[0], wm, nil
+ }
+ return nil, wm, nil
+}
+
+// RenewPeriodic is used to periodically invoke Session.Renew on a
+// session until a doneCh is closed. This is meant to be used in a long running
+// goroutine to ensure a session stays valid.
+func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error {
+ ctx := q.Context()
+
+ ttl, err := time.ParseDuration(initialTTL)
+ if err != nil {
+ return err
+ }
+
+ waitDur := ttl / 2
+ lastRenewTime := time.Now()
+ var lastErr error
+ for {
+ if time.Since(lastRenewTime) > ttl {
+ return lastErr
+ }
+ select {
+ case <-time.After(waitDur):
+ entry, _, err := s.Renew(id, q)
+ if err != nil {
+ waitDur = time.Second
+ lastErr = err
+ continue
+ }
+ if entry == nil {
+ return ErrSessionExpired
+ }
+
+ // Handle the server updating the TTL
+ ttl, _ = time.ParseDuration(entry.TTL)
+ waitDur = ttl / 2
+ lastRenewTime = time.Now()
+
+ case <-doneCh:
+ // Attempt a session destroy
+ s.Destroy(id, q)
+ return nil
+
+ case <-ctx.Done():
+ // Bail immediately since attempting the destroy would
+ // use the canceled context in q, which would just bail.
+ return ctx.Err()
+ }
+ }
+}
+
+// Info looks up a single session
+func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
+ var entries []*SessionEntry
+ qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(entries) > 0 {
+ return entries[0], qm, nil
+ }
+ return nil, qm, nil
+}
+
+// List gets sessions for a node
+func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+ var entries []*SessionEntry
+ qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
+
+// List gets all active sessions
+func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+ var entries []*SessionEntry
+ qm, err := s.c.query("/v1/session/list", &entries, q)
+ if err != nil {
+ return nil, nil, err
+ }
+ return entries, qm, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go
new file mode 100644
index 0000000..e902377
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/snapshot.go
@@ -0,0 +1,47 @@
+package api
+
+import (
+ "io"
+)
+
+// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of
+// Consul's internal state and restore snapshots for disaster recovery.
+type Snapshot struct {
+ c *Client
+}
+
+// Snapshot returns a handle that exposes the snapshot endpoints.
+func (c *Client) Snapshot() *Snapshot {
+ return &Snapshot{c}
+}
+
+// Save requests a new snapshot and provides an io.ReadCloser with the snapshot
+// data to save. If this doesn't return an error, then it's the responsibility
+// of the caller to close it. Only a subset of the QueryOptions are supported:
+// Datacenter, AllowStale, and Token.
+func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
+ r := s.c.newRequest("GET", "/v1/snapshot")
+ r.setQueryOptions(q)
+
+ rtt, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+ return resp.Body, qm, nil
+}
+
+// Restore streams in an existing snapshot and attempts to restore it.
+func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
+ r := s.c.newRequest("PUT", "/v1/snapshot")
+ r.body = in
+ r.setWriteOptions(q)
+ _, _, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go
new file mode 100644
index 0000000..74ef61a
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/status.go
@@ -0,0 +1,43 @@
+package api
+
+// Status can be used to query the Status endpoints
+type Status struct {
+ c *Client
+}
+
+// Status returns a handle to the status endpoints
+func (c *Client) Status() *Status {
+ return &Status{c}
+}
+
+// Leader is used to query for a known leader
+func (s *Status) Leader() (string, error) {
+ r := s.c.newRequest("GET", "/v1/status/leader")
+ _, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ var leader string
+ if err := decodeBody(resp, &leader); err != nil {
+ return "", err
+ }
+ return leader, nil
+}
+
+// Peers is used to query for a known raft peers
+func (s *Status) Peers() ([]string, error) {
+ r := s.c.newRequest("GET", "/v1/status/peers")
+ _, resp, err := requireOK(s.c.doRequest(r))
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var peers []string
+ if err := decodeBody(resp, &peers); err != nil {
+ return nil, err
+ }
+ return peers, nil
+}
diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go
new file mode 100644
index 0000000..ef06bcb
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/txn.go
@@ -0,0 +1,244 @@
+package api
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+// Txn is used to manipulate the Txn API
+type Txn struct {
+ c *Client
+}
+
+// Txn is used to return a handle to the K/V apis
+func (c *Client) Txn() *Txn {
+ return &Txn{c}
+}
+
+// TxnOp is the internal format we send to Consul. Currently only K/V and
+// check operations are supported.
+type TxnOp struct {
+ KV *KVTxnOp
+ Node *NodeTxnOp
+ Service *ServiceTxnOp
+ Check *CheckTxnOp
+}
+
+// TxnOps is a list of transaction operations.
+type TxnOps []*TxnOp
+
+// TxnResult is the internal format we receive from Consul.
+type TxnResult struct {
+ KV *KVPair
+ Node *Node
+ Service *CatalogService
+ Check *HealthCheck
+}
+
+// TxnResults is a list of TxnResult objects.
+type TxnResults []*TxnResult
+
+// TxnError is used to return information about an operation in a transaction.
+type TxnError struct {
+ OpIndex int
+ What string
+}
+
+// TxnErrors is a list of TxnError objects.
+type TxnErrors []*TxnError
+
+// TxnResponse is the internal format we receive from Consul.
+type TxnResponse struct {
+ Results TxnResults
+ Errors TxnErrors
+}
+
+// KVOp constants give possible operations available in a transaction.
+type KVOp string
+
+const (
+ KVSet KVOp = "set"
+ KVDelete KVOp = "delete"
+ KVDeleteCAS KVOp = "delete-cas"
+ KVDeleteTree KVOp = "delete-tree"
+ KVCAS KVOp = "cas"
+ KVLock KVOp = "lock"
+ KVUnlock KVOp = "unlock"
+ KVGet KVOp = "get"
+ KVGetTree KVOp = "get-tree"
+ KVCheckSession KVOp = "check-session"
+ KVCheckIndex KVOp = "check-index"
+ KVCheckNotExists KVOp = "check-not-exists"
+)
+
+// KVTxnOp defines a single operation inside a transaction.
+type KVTxnOp struct {
+ Verb KVOp
+ Key string
+ Value []byte
+ Flags uint64
+ Index uint64
+ Session string
+ Namespace string `json:",omitempty"`
+}
+
+// KVTxnOps defines a set of operations to be performed inside a single
+// transaction.
+type KVTxnOps []*KVTxnOp
+
+// KVTxnResponse has the outcome of a transaction.
+type KVTxnResponse struct {
+ Results []*KVPair
+ Errors TxnErrors
+}
+
+// SessionOp constants give possible operations available in a transaction.
+type SessionOp string
+
+const (
+ SessionDelete SessionOp = "delete"
+)
+
+// SessionTxnOp defines a single operation inside a transaction.
+type SessionTxnOp struct {
+ Verb SessionOp
+ Session Session
+}
+
+// NodeOp constants give possible operations available in a transaction.
+type NodeOp string
+
+const (
+ NodeGet NodeOp = "get"
+ NodeSet NodeOp = "set"
+ NodeCAS NodeOp = "cas"
+ NodeDelete NodeOp = "delete"
+ NodeDeleteCAS NodeOp = "delete-cas"
+)
+
+// NodeTxnOp defines a single operation inside a transaction.
+type NodeTxnOp struct {
+ Verb NodeOp
+ Node Node
+}
+
+// ServiceOp constants give possible operations available in a transaction.
+type ServiceOp string
+
+const (
+ ServiceGet ServiceOp = "get"
+ ServiceSet ServiceOp = "set"
+ ServiceCAS ServiceOp = "cas"
+ ServiceDelete ServiceOp = "delete"
+ ServiceDeleteCAS ServiceOp = "delete-cas"
+)
+
+// ServiceTxnOp defines a single operation inside a transaction.
+type ServiceTxnOp struct {
+ Verb ServiceOp
+ Node string
+ Service AgentService
+}
+
+// CheckOp constants give possible operations available in a transaction.
+type CheckOp string
+
+const (
+ CheckGet CheckOp = "get"
+ CheckSet CheckOp = "set"
+ CheckCAS CheckOp = "cas"
+ CheckDelete CheckOp = "delete"
+ CheckDeleteCAS CheckOp = "delete-cas"
+)
+
+// CheckTxnOp defines a single operation inside a transaction.
+type CheckTxnOp struct {
+ Verb CheckOp
+ Check HealthCheck
+}
+
+// Txn is used to apply multiple Consul operations in a single, atomic transaction.
+//
+// Note that Go will perform the required base64 encoding on the values
+// automatically because the type is a byte slice. Transactions are defined as a
+// list of operations to perform, using the different fields in the TxnOp structure
+// to define operations. If any operation fails, none of the changes are applied
+// to the state store.
+//
+// Even though this is generally a write operation, we take a QueryOptions input
+// and return a QueryMeta output. If the transaction contains only read ops, then
+// Consul will fast-path it to a different endpoint internally which supports
+// consistency controls, but not blocking. If there are write operations then
+// the request will always be routed through raft and any consistency settings
+// will be ignored.
+//
+// Here's an example:
+//
+// ops := KVTxnOps{
+// &KVTxnOp{
+// Verb: KVLock,
+// Key: "test/lock",
+// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
+// Value: []byte("hello"),
+// },
+// &KVTxnOp{
+// Verb: KVGet,
+// Key: "another/key",
+// },
+// &CheckTxnOp{
+// Verb: CheckSet,
+// HealthCheck: HealthCheck{
+// Node: "foo",
+// CheckID: "redis:a",
+// Name: "Redis Health Check",
+// Status: "passing",
+// },
+// }
+// }
+// ok, response, _, err := kv.Txn(&ops, nil)
+//
+// If there is a problem making the transaction request then an error will be
+// returned. Otherwise, the ok value will be true if the transaction succeeded
+// or false if it was rolled back. The response is a structured return value which
+// will have the outcome of the transaction. Its Results member will have entries
+// for each operation. For KV operations, Deleted keys will have a nil entry in the
+// results, and to save space, the Value of each key in the Results will be nil
+// unless the operation is a KVGet. If the transaction was rolled back, the Errors
+// member will have entries referencing the index of the operation that failed
+// along with an error message.
+func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
+ return t.c.txn(txn, q)
+}
+
+func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
+ r := c.newRequest("PUT", "/v1/txn")
+ r.setQueryOptions(q)
+
+ r.obj = txn
+ rtt, resp, err := c.doRequest(r)
+ if err != nil {
+ return false, nil, nil, err
+ }
+ defer resp.Body.Close()
+
+ qm := &QueryMeta{}
+ parseQueryMeta(resp, qm)
+ qm.RequestTime = rtt
+
+ if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
+ var txnResp TxnResponse
+ if err := decodeBody(resp, &txnResp); err != nil {
+ return false, nil, nil, err
+ }
+
+ return resp.StatusCode == http.StatusOK, &txnResp, qm, nil
+ }
+
+ var buf bytes.Buffer
+ if _, err := io.Copy(&buf, resp.Body); err != nil {
+ return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
+ }
+ return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
+}
diff --git a/vendor/github.com/hashicorp/consul/api/watch/funcs.go b/vendor/github.com/hashicorp/consul/api/watch/funcs.go
new file mode 100644
index 0000000..cc4f333
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/watch/funcs.go
@@ -0,0 +1,321 @@
+package watch
+
+import (
+ "context"
+ "fmt"
+
+ consulapi "github.com/hashicorp/consul/api"
+)
+
+// watchFactory is a function that can create a new WatchFunc
+// from a parameter configuration
+type watchFactory func(params map[string]interface{}) (WatcherFunc, error)
+
+// watchFuncFactory maps each type to a factory function
+var watchFuncFactory map[string]watchFactory
+
+func init() {
+ watchFuncFactory = map[string]watchFactory{
+ "key": keyWatch,
+ "keyprefix": keyPrefixWatch,
+ "services": servicesWatch,
+ "nodes": nodesWatch,
+ "service": serviceWatch,
+ "checks": checksWatch,
+ "event": eventWatch,
+ "connect_roots": connectRootsWatch,
+ "connect_leaf": connectLeafWatch,
+ "agent_service": agentServiceWatch,
+ }
+}
+
+// keyWatch is used to return a key watching function
+func keyWatch(params map[string]interface{}) (WatcherFunc, error) {
+ stale := false
+ if err := assignValueBool(params, "stale", &stale); err != nil {
+ return nil, err
+ }
+
+ var key string
+ if err := assignValue(params, "key", &key); err != nil {
+ return nil, err
+ }
+ if key == "" {
+ return nil, fmt.Errorf("Must specify a single key to watch")
+ }
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ kv := p.client.KV()
+ opts := makeQueryOptionsWithContext(p, stale)
+ defer p.cancelFunc()
+ pair, meta, err := kv.Get(key, &opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ if pair == nil {
+ return WaitIndexVal(meta.LastIndex), nil, err
+ }
+ return WaitIndexVal(meta.LastIndex), pair, err
+ }
+ return fn, nil
+}
+
+// keyPrefixWatch is used to return a key prefix watching function
+func keyPrefixWatch(params map[string]interface{}) (WatcherFunc, error) {
+ stale := false
+ if err := assignValueBool(params, "stale", &stale); err != nil {
+ return nil, err
+ }
+
+ var prefix string
+ if err := assignValue(params, "prefix", &prefix); err != nil {
+ return nil, err
+ }
+ if prefix == "" {
+ return nil, fmt.Errorf("Must specify a single prefix to watch")
+ }
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ kv := p.client.KV()
+ opts := makeQueryOptionsWithContext(p, stale)
+ defer p.cancelFunc()
+ pairs, meta, err := kv.List(prefix, &opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ return WaitIndexVal(meta.LastIndex), pairs, err
+ }
+ return fn, nil
+}
+
+// servicesWatch is used to watch the list of available services
+func servicesWatch(params map[string]interface{}) (WatcherFunc, error) {
+ stale := false
+ if err := assignValueBool(params, "stale", &stale); err != nil {
+ return nil, err
+ }
+
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ catalog := p.client.Catalog()
+ opts := makeQueryOptionsWithContext(p, stale)
+ defer p.cancelFunc()
+ services, meta, err := catalog.Services(&opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ return WaitIndexVal(meta.LastIndex), services, err
+ }
+ return fn, nil
+}
+
+// nodesWatch is used to watch the list of available nodes
+func nodesWatch(params map[string]interface{}) (WatcherFunc, error) {
+ stale := false
+ if err := assignValueBool(params, "stale", &stale); err != nil {
+ return nil, err
+ }
+
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ catalog := p.client.Catalog()
+ opts := makeQueryOptionsWithContext(p, stale)
+ defer p.cancelFunc()
+ nodes, meta, err := catalog.Nodes(&opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ return WaitIndexVal(meta.LastIndex), nodes, err
+ }
+ return fn, nil
+}
+
+// serviceWatch is used to watch a specific service for changes
+func serviceWatch(params map[string]interface{}) (WatcherFunc, error) {
+ stale := false
+ if err := assignValueBool(params, "stale", &stale); err != nil {
+ return nil, err
+ }
+
+ var (
+ service string
+ tags []string
+ )
+ if err := assignValue(params, "service", &service); err != nil {
+ return nil, err
+ }
+ if service == "" {
+ return nil, fmt.Errorf("Must specify a single service to watch")
+ }
+ if err := assignValueStringSlice(params, "tag", &tags); err != nil {
+ return nil, err
+ }
+
+ passingOnly := false
+ if err := assignValueBool(params, "passingonly", &passingOnly); err != nil {
+ return nil, err
+ }
+
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ health := p.client.Health()
+ opts := makeQueryOptionsWithContext(p, stale)
+ defer p.cancelFunc()
+ nodes, meta, err := health.ServiceMultipleTags(service, tags, passingOnly, &opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ return WaitIndexVal(meta.LastIndex), nodes, err
+ }
+ return fn, nil
+}
+
+// checksWatch is used to watch a specific checks in a given state
+func checksWatch(params map[string]interface{}) (WatcherFunc, error) {
+ stale := false
+ if err := assignValueBool(params, "stale", &stale); err != nil {
+ return nil, err
+ }
+
+ var service, state string
+ if err := assignValue(params, "service", &service); err != nil {
+ return nil, err
+ }
+ if err := assignValue(params, "state", &state); err != nil {
+ return nil, err
+ }
+ if service != "" && state != "" {
+ return nil, fmt.Errorf("Cannot specify service and state")
+ }
+ if service == "" && state == "" {
+ state = "any"
+ }
+
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ health := p.client.Health()
+ opts := makeQueryOptionsWithContext(p, stale)
+ defer p.cancelFunc()
+ var checks []*consulapi.HealthCheck
+ var meta *consulapi.QueryMeta
+ var err error
+ if state != "" {
+ checks, meta, err = health.State(state, &opts)
+ } else {
+ checks, meta, err = health.Checks(service, &opts)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ return WaitIndexVal(meta.LastIndex), checks, err
+ }
+ return fn, nil
+}
+
+// eventWatch is used to watch for events, optionally filtering on name
+func eventWatch(params map[string]interface{}) (WatcherFunc, error) {
+ // The stale setting doesn't apply to events.
+
+ var name string
+ if err := assignValue(params, "name", &name); err != nil {
+ return nil, err
+ }
+
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ event := p.client.Event()
+ opts := makeQueryOptionsWithContext(p, false)
+ defer p.cancelFunc()
+ events, meta, err := event.List(name, &opts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Prune to only the new events
+ for i := 0; i < len(events); i++ {
+ if WaitIndexVal(event.IDToIndex(events[i].ID)).Equal(p.lastParamVal) {
+ events = events[i+1:]
+ break
+ }
+ }
+ return WaitIndexVal(meta.LastIndex), events, err
+ }
+ return fn, nil
+}
+
+// connectRootsWatch is used to watch for changes to Connect Root certificates.
+func connectRootsWatch(params map[string]interface{}) (WatcherFunc, error) {
+ // We don't support stale since roots are cached locally in the agent.
+
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ agent := p.client.Agent()
+ opts := makeQueryOptionsWithContext(p, false)
+ defer p.cancelFunc()
+
+ roots, meta, err := agent.ConnectCARoots(&opts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return WaitIndexVal(meta.LastIndex), roots, err
+ }
+ return fn, nil
+}
+
+// connectLeafWatch is used to watch for changes to Connect Leaf certificates
+// for given local service id.
+func connectLeafWatch(params map[string]interface{}) (WatcherFunc, error) {
+ // We don't support stale since certs are cached locally in the agent.
+
+ var serviceName string
+ if err := assignValue(params, "service", &serviceName); err != nil {
+ return nil, err
+ }
+
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ agent := p.client.Agent()
+ opts := makeQueryOptionsWithContext(p, false)
+ defer p.cancelFunc()
+
+ leaf, meta, err := agent.ConnectCALeaf(serviceName, &opts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return WaitIndexVal(meta.LastIndex), leaf, err
+ }
+ return fn, nil
+}
+
+// agentServiceWatch is used to watch for changes to a single service instance
+// on the local agent. Note that this state is agent-local so the watch
+// mechanism uses `hash` rather than `index` for deciding whether to block.
+func agentServiceWatch(params map[string]interface{}) (WatcherFunc, error) {
+ // We don't support consistency modes since it's agent local data
+
+ var serviceID string
+ if err := assignValue(params, "service_id", &serviceID); err != nil {
+ return nil, err
+ }
+
+ fn := func(p *Plan) (BlockingParamVal, interface{}, error) {
+ agent := p.client.Agent()
+ opts := makeQueryOptionsWithContext(p, false)
+ defer p.cancelFunc()
+
+ svc, _, err := agent.Service(serviceID, &opts)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Return string ContentHash since we don't have Raft indexes to block on.
+ return WaitHashVal(svc.ContentHash), svc, err
+ }
+ return fn, nil
+}
+
+func makeQueryOptionsWithContext(p *Plan, stale bool) consulapi.QueryOptions {
+ ctx, cancel := context.WithCancel(context.Background())
+ p.setCancelFunc(cancel)
+ opts := consulapi.QueryOptions{AllowStale: stale}
+ switch param := p.lastParamVal.(type) {
+ case WaitIndexVal:
+ opts.WaitIndex = uint64(param)
+ case WaitHashVal:
+ opts.WaitHash = string(param)
+ }
+ return *opts.WithContext(ctx)
+}
diff --git a/vendor/github.com/hashicorp/consul/api/watch/plan.go b/vendor/github.com/hashicorp/consul/api/watch/plan.go
new file mode 100644
index 0000000..f3b7981
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/watch/plan.go
@@ -0,0 +1,254 @@
+package watch
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "reflect"
+ "time"
+
+ consulapi "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/go-hclog"
+)
+
+const (
+ // retryInterval is the base retry value
+ retryInterval = 5 * time.Second
+
+ // maximum back off time, this is to prevent
+ // exponential runaway
+ maxBackoffTime = 180 * time.Second
+
+ // Name used with hclog Logger. We do not add this to the logging package
+ // because we do not want to pull in the root consul module.
+ watchLoggerName = "watch"
+)
+
+func (p *Plan) Run(address string) error {
+ return p.RunWithConfig(address, nil)
+}
+
+// Run is used to run a watch plan
+func (p *Plan) RunWithConfig(address string, conf *consulapi.Config) error {
+ logger := p.Logger
+ if logger == nil {
+ logger = newWatchLogger(p.LogOutput)
+ }
+
+ // Setup the client
+ p.address = address
+ if conf == nil {
+ conf = consulapi.DefaultConfigWithLogger(logger)
+ }
+ conf.Address = address
+ conf.Datacenter = p.Datacenter
+ conf.Token = p.Token
+ client, err := consulapi.NewClient(conf)
+ if err != nil {
+ return fmt.Errorf("Failed to connect to agent: %v", err)
+ }
+
+ return p.RunWithClientAndHclog(client, logger)
+}
+
+// RunWithClientAndLogger runs a watch plan using an external client and
+// hclog.Logger instance. Using this, the plan's Datacenter, Token and LogOutput
+// fields are ignored and the passed client is expected to be configured as
+// needed.
+func (p *Plan) RunWithClientAndHclog(client *consulapi.Client, logger hclog.Logger) error {
+ var watchLogger hclog.Logger
+ if logger == nil {
+ watchLogger = newWatchLogger(nil)
+ } else {
+ watchLogger = logger.Named(watchLoggerName)
+ }
+
+ p.client = client
+
+ // Loop until we are canceled
+ failures := 0
+OUTER:
+ for !p.shouldStop() {
+ // Invoke the handler
+ blockParamVal, result, err := p.Watcher(p)
+
+ // Check if we should terminate since the function
+ // could have blocked for a while
+ if p.shouldStop() {
+ break
+ }
+
+ // Handle an error in the watch function
+ if err != nil {
+ // Perform an exponential backoff
+ failures++
+ if blockParamVal == nil {
+ p.lastParamVal = nil
+ } else {
+ p.lastParamVal = blockParamVal.Next(p.lastParamVal)
+ }
+ retry := retryInterval * time.Duration(failures*failures)
+ if retry > maxBackoffTime {
+ retry = maxBackoffTime
+ }
+ watchLogger.Error("Watch errored", "type", p.Type, "error", err, "retry", retry)
+ select {
+ case <-time.After(retry):
+ continue OUTER
+ case <-p.stopCh:
+ return nil
+ }
+ }
+
+ // Clear the failures
+ failures = 0
+
+ // If the index is unchanged do nothing
+ if p.lastParamVal != nil && p.lastParamVal.Equal(blockParamVal) {
+ continue
+ }
+
+ // Update the index, look for change
+ oldParamVal := p.lastParamVal
+ p.lastParamVal = blockParamVal.Next(oldParamVal)
+ if oldParamVal != nil && reflect.DeepEqual(p.lastResult, result) {
+ continue
+ }
+
+ // Handle the updated result
+ p.lastResult = result
+ // If a hybrid handler exists use that
+ if p.HybridHandler != nil {
+ p.HybridHandler(blockParamVal, result)
+ } else if p.Handler != nil {
+ idx, ok := blockParamVal.(WaitIndexVal)
+ if !ok {
+ watchLogger.Error("Handler only supports index-based " +
+ " watches but non index-based watch run. Skipping Handler.")
+ }
+ p.Handler(uint64(idx), result)
+ }
+ }
+ return nil
+}
+
+//Deprecated: Use RunwithClientAndHclog
+func (p *Plan) RunWithClientAndLogger(client *consulapi.Client, logger *log.Logger) error {
+
+ p.client = client
+
+ // Loop until we are canceled
+ failures := 0
+OUTER:
+ for !p.shouldStop() {
+ // Invoke the handler
+ blockParamVal, result, err := p.Watcher(p)
+
+ // Check if we should terminate since the function
+ // could have blocked for a while
+ if p.shouldStop() {
+ break
+ }
+
+ // Handle an error in the watch function
+ if err != nil {
+ // Perform an exponential backoff
+ failures++
+ if blockParamVal == nil {
+ p.lastParamVal = nil
+ } else {
+ p.lastParamVal = blockParamVal.Next(p.lastParamVal)
+ }
+ retry := retryInterval * time.Duration(failures*failures)
+ if retry > maxBackoffTime {
+ retry = maxBackoffTime
+ }
+ logger.Printf("[ERR] consul.watch: Watch (type: %s) errored: %v, retry in %v",
+ p.Type, err, retry)
+ select {
+ case <-time.After(retry):
+ continue OUTER
+ case <-p.stopCh:
+ return nil
+ }
+ }
+
+ // Clear the failures
+ failures = 0
+
+ // If the index is unchanged do nothing
+ if p.lastParamVal != nil && p.lastParamVal.Equal(blockParamVal) {
+ continue
+ }
+
+ // Update the index, look for change
+ oldParamVal := p.lastParamVal
+ p.lastParamVal = blockParamVal.Next(oldParamVal)
+ if oldParamVal != nil && reflect.DeepEqual(p.lastResult, result) {
+ continue
+ }
+
+ // Handle the updated result
+ p.lastResult = result
+ // If a hybrid handler exists use that
+ if p.HybridHandler != nil {
+ p.HybridHandler(blockParamVal, result)
+ } else if p.Handler != nil {
+ idx, ok := blockParamVal.(WaitIndexVal)
+ if !ok {
+ logger.Printf("[ERR] consul.watch: Handler only supports index-based " +
+ " watches but non index-based watch run. Skipping Handler.")
+ }
+ p.Handler(uint64(idx), result)
+ }
+ }
+ return nil
+}
+
+// Stop is used to stop running the watch plan
+func (p *Plan) Stop() {
+ p.stopLock.Lock()
+ defer p.stopLock.Unlock()
+ if p.stop {
+ return
+ }
+ p.stop = true
+ if p.cancelFunc != nil {
+ p.cancelFunc()
+ }
+ close(p.stopCh)
+}
+
+func (p *Plan) shouldStop() bool {
+ select {
+ case <-p.stopCh:
+ return true
+ default:
+ return false
+ }
+}
+
+func (p *Plan) setCancelFunc(cancel context.CancelFunc) {
+ p.stopLock.Lock()
+ defer p.stopLock.Unlock()
+ if p.shouldStop() {
+ // The watch is stopped and execute the new cancel func to stop watchFactory
+ cancel()
+ return
+ }
+ p.cancelFunc = cancel
+}
+
+func (p *Plan) IsStopped() bool {
+ p.stopLock.Lock()
+ defer p.stopLock.Unlock()
+ return p.stop
+}
+
+func newWatchLogger(output io.Writer) hclog.Logger {
+ return hclog.New(&hclog.LoggerOptions{
+ Name: watchLoggerName,
+ Output: output,
+ })
+}
diff --git a/vendor/github.com/hashicorp/consul/api/watch/watch.go b/vendor/github.com/hashicorp/consul/api/watch/watch.go
new file mode 100644
index 0000000..1dce252
--- /dev/null
+++ b/vendor/github.com/hashicorp/consul/api/watch/watch.go
@@ -0,0 +1,293 @@
+package watch
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ consulapi "github.com/hashicorp/consul/api"
+ "github.com/hashicorp/go-hclog"
+ "github.com/mitchellh/mapstructure"
+)
+
+const DefaultTimeout = 10 * time.Second
+
+// Plan is the parsed version of a watch specification. A watch provides
+// the details of a query, which generates a view into the Consul data store.
+// This view is watched for changes and a handler is invoked to take any
+// appropriate actions.
+type Plan struct {
+ Datacenter string
+ Token string
+ Type string
+ HandlerType string
+ Exempt map[string]interface{}
+
+ Watcher WatcherFunc
+ // Handler is kept for backward compatibility but only supports watches based
+ // on index param. To support hash based watches, set HybridHandler instead.
+ Handler HandlerFunc
+ HybridHandler HybridHandlerFunc
+
+ Logger hclog.Logger
+ // Deprecated: use Logger
+ LogOutput io.Writer
+
+ address string
+ client *consulapi.Client
+ lastParamVal BlockingParamVal
+ lastResult interface{}
+
+ stop bool
+ stopCh chan struct{}
+ stopLock sync.Mutex
+ cancelFunc context.CancelFunc
+}
+
+type HttpHandlerConfig struct {
+ Path string `mapstructure:"path"`
+ Method string `mapstructure:"method"`
+ Timeout time.Duration `mapstructure:"-"`
+ TimeoutRaw string `mapstructure:"timeout"`
+ Header map[string][]string `mapstructure:"header"`
+ TLSSkipVerify bool `mapstructure:"tls_skip_verify"`
+}
+
+// BlockingParamVal is an interface representing the common operations needed for
+// different styles of blocking. It's used to abstract the core watch plan from
+// whether we are performing index-based or hash-based blocking.
+type BlockingParamVal interface {
+ // Equal returns whether the other param value should be considered equal
+ // (i.e. representing no change in the watched resource). Equal must not panic
+ // if other is nil.
+ Equal(other BlockingParamVal) bool
+
+ // Next is called when deciding which value to use on the next blocking call.
+ // It assumes the BlockingParamVal value it is called on is the most recent one
+ // returned and passes the previous one which may be nil as context. This
+ // allows types to customize logic around ordering without assuming there is
+ // an order. For example WaitIndexVal can check that the index didn't go
+ // backwards and if it did then reset to 0. Most other cases should just
+ // return themselves (the most recent value) to be used in the next request.
+ Next(previous BlockingParamVal) BlockingParamVal
+}
+
+// WaitIndexVal is a type representing a Consul index that implements
+// BlockingParamVal.
+type WaitIndexVal uint64
+
+// Equal implements BlockingParamVal
+func (idx WaitIndexVal) Equal(other BlockingParamVal) bool {
+ if otherIdx, ok := other.(WaitIndexVal); ok {
+ return idx == otherIdx
+ }
+ return false
+}
+
+// Next implements BlockingParamVal
+func (idx WaitIndexVal) Next(previous BlockingParamVal) BlockingParamVal {
+ if previous == nil {
+ return idx
+ }
+ prevIdx, ok := previous.(WaitIndexVal)
+ if ok && prevIdx == idx {
+ // This value is the same as the previous index, reset
+ return WaitIndexVal(0)
+ }
+ return idx
+}
+
+// WaitHashVal is a type representing a Consul content hash that implements
+// BlockingParamVal.
+type WaitHashVal string
+
+// Equal implements BlockingParamVal
+func (h WaitHashVal) Equal(other BlockingParamVal) bool {
+ if otherHash, ok := other.(WaitHashVal); ok {
+ return h == otherHash
+ }
+ return false
+}
+
+// Next implements BlockingParamVal
+func (h WaitHashVal) Next(previous BlockingParamVal) BlockingParamVal {
+ return h
+}
+
+// WatcherFunc is used to watch for a diff.
+type WatcherFunc func(*Plan) (BlockingParamVal, interface{}, error)
+
+// HandlerFunc is used to handle new data. It only works for index-based watches
+// (which is almost all end points currently) and is kept for backwards
+// compatibility until more places can make use of hash-based watches too.
+type HandlerFunc func(uint64, interface{})
+
+// HybridHandlerFunc is used to handle new data. It can support either
+// index-based or hash-based watches via the BlockingParamVal.
+type HybridHandlerFunc func(BlockingParamVal, interface{})
+
+// Parse takes a watch query and compiles it into a WatchPlan or an error
+func Parse(params map[string]interface{}) (*Plan, error) {
+ return ParseExempt(params, nil)
+}
+
+// ParseExempt takes a watch query and compiles it into a WatchPlan or an error
+// Any exempt parameters are stored in the Exempt map
+func ParseExempt(params map[string]interface{}, exempt []string) (*Plan, error) {
+ plan := &Plan{
+ stopCh: make(chan struct{}),
+ Exempt: make(map[string]interface{}),
+ }
+
+ // Parse the generic parameters
+ if err := assignValue(params, "datacenter", &plan.Datacenter); err != nil {
+ return nil, err
+ }
+ if err := assignValue(params, "token", &plan.Token); err != nil {
+ return nil, err
+ }
+ if err := assignValue(params, "type", &plan.Type); err != nil {
+ return nil, err
+ }
+ // Ensure there is a watch type
+ if plan.Type == "" {
+ return nil, fmt.Errorf("Watch type must be specified")
+ }
+
+ // Get the specific handler
+ if err := assignValue(params, "handler_type", &plan.HandlerType); err != nil {
+ return nil, err
+ }
+ switch plan.HandlerType {
+ case "http":
+ if _, ok := params["http_handler_config"]; !ok {
+ return nil, fmt.Errorf("Handler type 'http' requires 'http_handler_config' to be set")
+ }
+ config, err := parseHttpHandlerConfig(params["http_handler_config"])
+ if err != nil {
+ return nil, fmt.Errorf(fmt.Sprintf("Failed to parse 'http_handler_config': %v", err))
+ }
+ plan.Exempt["http_handler_config"] = config
+ delete(params, "http_handler_config")
+
+ case "script":
+ // Let the caller check for configuration in exempt parameters
+ }
+
+ // Look for a factory function
+ factory := watchFuncFactory[plan.Type]
+ if factory == nil {
+ return nil, fmt.Errorf("Unsupported watch type: %s", plan.Type)
+ }
+
+ // Get the watch func
+ fn, err := factory(params)
+ if err != nil {
+ return nil, err
+ }
+ plan.Watcher = fn
+
+ // Remove the exempt parameters
+ if len(exempt) > 0 {
+ for _, ex := range exempt {
+ val, ok := params[ex]
+ if ok {
+ plan.Exempt[ex] = val
+ delete(params, ex)
+ }
+ }
+ }
+
+ // Ensure all parameters are consumed
+ if len(params) != 0 {
+ var bad []string
+ for key := range params {
+ bad = append(bad, key)
+ }
+ return nil, fmt.Errorf("Invalid parameters: %v", bad)
+ }
+ return plan, nil
+}
+
+// assignValue is used to extract a value ensuring it is a string
+func assignValue(params map[string]interface{}, name string, out *string) error {
+ if raw, ok := params[name]; ok {
+ val, ok := raw.(string)
+ if !ok {
+ return fmt.Errorf("Expecting %s to be a string", name)
+ }
+ *out = val
+ delete(params, name)
+ }
+ return nil
+}
+
+// assignValueBool is used to extract a value ensuring it is a bool
+func assignValueBool(params map[string]interface{}, name string, out *bool) error {
+ if raw, ok := params[name]; ok {
+ val, ok := raw.(bool)
+ if !ok {
+ return fmt.Errorf("Expecting %s to be a boolean", name)
+ }
+ *out = val
+ delete(params, name)
+ }
+ return nil
+}
+
+// assignValueStringSlice is used to extract a value ensuring it is either a string or a slice of strings
+func assignValueStringSlice(params map[string]interface{}, name string, out *[]string) error {
+ if raw, ok := params[name]; ok {
+ var tmp []string
+ switch raw.(type) {
+ case string:
+ tmp = make([]string, 1, 1)
+ tmp[0] = raw.(string)
+ case []string:
+ l := len(raw.([]string))
+ tmp = make([]string, l, l)
+ copy(tmp, raw.([]string))
+ case []interface{}:
+ l := len(raw.([]interface{}))
+ tmp = make([]string, l, l)
+ for i, v := range raw.([]interface{}) {
+ if s, ok := v.(string); ok {
+ tmp[i] = s
+ } else {
+ return fmt.Errorf("Index %d of %s expected to be string", i, name)
+ }
+ }
+ default:
+ return fmt.Errorf("Expecting %s to be a string or []string", name)
+ }
+ *out = tmp
+ delete(params, name)
+ }
+ return nil
+}
+
+// Parse the 'http_handler_config' parameters
+func parseHttpHandlerConfig(configParams interface{}) (*HttpHandlerConfig, error) {
+ var config HttpHandlerConfig
+ if err := mapstructure.Decode(configParams, &config); err != nil {
+ return nil, err
+ }
+
+ if config.Path == "" {
+ return nil, fmt.Errorf("Requires 'path' to be set")
+ }
+ if config.Method == "" {
+ config.Method = "POST"
+ }
+ if config.TimeoutRaw == "" {
+ config.Timeout = DefaultTimeout
+ } else if timeout, err := time.ParseDuration(config.TimeoutRaw); err != nil {
+ return nil, fmt.Errorf(fmt.Sprintf("Failed to parse timeout: %v", err))
+ } else {
+ config.Timeout = timeout
+ }
+
+ return &config, nil
+}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md
new file mode 100644
index 0000000..036e531
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/README.md
@@ -0,0 +1,30 @@
+# cleanhttp
+
+Functions for accessing "clean" Go http.Client values
+
+-------------
+
+The Go standard library contains a default `http.Client` called
+`http.DefaultClient`. It is a common idiom in Go code to start with
+`http.DefaultClient` and tweak it as necessary, and in fact, this is
+encouraged; from the `http` package documentation:
+
+> The Client's Transport typically has internal state (cached TCP connections),
+so Clients should be reused instead of created as needed. Clients are safe for
+concurrent use by multiple goroutines.
+
+Unfortunately, this is a shared value, and it is not uncommon for libraries to
+assume that they are free to modify it at will. With enough dependencies, it
+can be very easy to encounter strange problems and race conditions due to
+manipulation of this shared value across libraries and goroutines (clients are
+safe for concurrent use, but writing values to the client struct itself is not
+protected).
+
+Making things worse is the fact that a bare `http.Client` will use a default
+`http.Transport` called `http.DefaultTransport`, which is another global value
+that behaves the same way. So it is not simply enough to replace
+`http.DefaultClient` with `&http.Client{}`.
+
+This repository provides some simple functions to get a "clean" `http.Client`
+-- one that uses the same default values as the Go standard library, but
+returns a client that does not share any state with other clients.
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
new file mode 100644
index 0000000..8d306bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
@@ -0,0 +1,57 @@
+package cleanhttp
+
+import (
+ "net"
+ "net/http"
+ "runtime"
+ "time"
+)
+
+// DefaultTransport returns a new http.Transport with similar default values to
+// http.DefaultTransport, but with idle connections and keepalives disabled.
+func DefaultTransport() *http.Transport {
+ transport := DefaultPooledTransport()
+ transport.DisableKeepAlives = true
+ transport.MaxIdleConnsPerHost = -1
+ return transport
+}
+
+// DefaultPooledTransport returns a new http.Transport with similar default
+// values to http.DefaultTransport. Do not use this for transient transports as
+// it can leak file descriptors over time. Only use this for transports that
+// will be re-used for the same host(s).
+func DefaultPooledTransport() *http.Transport {
+ transport := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
+ }
+ return transport
+}
+
+// DefaultClient returns a new http.Client with similar default values to
+// http.Client, but with a non-shared Transport, idle connections disabled, and
+// keepalives disabled.
+func DefaultClient() *http.Client {
+ return &http.Client{
+ Transport: DefaultTransport(),
+ }
+}
+
+// DefaultPooledClient returns a new http.Client with similar default values to
+// http.Client, but with a shared Transport. Do not use this function for
+// transient clients as it can leak file descriptors over time. Only use this
+// for clients that will be re-used for the same host(s).
+func DefaultPooledClient() *http.Client {
+ return &http.Client{
+ Transport: DefaultPooledTransport(),
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go
new file mode 100644
index 0000000..0584109
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go
@@ -0,0 +1,20 @@
+// Package cleanhttp offers convenience utilities for acquiring "clean"
+// http.Transport and http.Client structs.
+//
+// Values set on http.DefaultClient and http.DefaultTransport affect all
+// callers. This can have detrimental effects, esepcially in TLS contexts,
+// where client or root certificates set to talk to multiple endpoints can end
+// up displacing each other, leading to hard-to-debug issues. This package
+// provides non-shared http.Client and http.Transport structs to ensure that
+// the configuration will not be overwritten by other parts of the application
+// or dependencies.
+//
+// The DefaultClient and DefaultTransport functions disable idle connections
+// and keepalives. Without ensuring that idle connections are closed before
+// garbage collection, short-term clients/transports can leak file descriptors,
+// eventually leading to "too many open files" errors. If you will be
+// connecting to the same hosts repeatedly from the same client, you can use
+// DefaultPooledClient to receive a client that has connection pooling
+// semantics similar to http.DefaultClient.
+//
+package cleanhttp
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod
new file mode 100644
index 0000000..310f075
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod
@@ -0,0 +1 @@
+module github.com/hashicorp/go-cleanhttp
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
new file mode 100644
index 0000000..3c845dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
@@ -0,0 +1,48 @@
+package cleanhttp
+
+import (
+ "net/http"
+ "strings"
+ "unicode"
+)
+
+// HandlerInput provides input options to cleanhttp's handlers
+type HandlerInput struct {
+ ErrStatus int
+}
+
+// PrintablePathCheckHandler is a middleware that ensures the request path
+// contains only printable runes.
+func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
+ // Nil-check on input to make it optional
+ if input == nil {
+ input = &HandlerInput{
+ ErrStatus: http.StatusBadRequest,
+ }
+ }
+
+ // Default to http.StatusBadRequest on error
+ if input.ErrStatus == 0 {
+ input.ErrStatus = http.StatusBadRequest
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r != nil {
+ // Check URL path for non-printable characters
+ idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
+ return !unicode.IsPrint(c)
+ })
+
+ if idx != -1 {
+ w.WriteHeader(input.ErrStatus)
+ return
+ }
+
+ if next != nil {
+ next.ServeHTTP(w, r)
+ }
+ }
+
+ return
+ })
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore
new file mode 100644
index 0000000..42cc410
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/.gitignore
@@ -0,0 +1 @@
+.idea*
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE
new file mode 100644
index 0000000..abaf1e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2017 HashiCorp
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md
new file mode 100644
index 0000000..5d56f4b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/README.md
@@ -0,0 +1,148 @@
+# go-hclog
+
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
+
+`go-hclog` is a package for Go that provides a simple key/value logging
+interface for use in development and production environments.
+
+It provides logging levels that provide decreased output based upon the
+desired amount of output, unlike the standard library `log` package.
+
+It provides `Printf` style logging of values via `hclog.Fmt()`.
+
+It provides a human readable output mode for use in development as well as
+JSON output mode for production.
+
+## Stability Note
+
+While this library is fully open source and HashiCorp will be maintaining it
+(since we are and will be making extensive use of it), the API and output
+format is subject to minor changes as we fully bake and vet it in our projects.
+This notice will be removed once it's fully integrated into our major projects
+and no further changes are anticipated.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/go-hclog`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/go-hclog
+
+## Usage
+
+### Use the global logger
+
+```go
+hclog.Default().Info("hello world")
+```
+
+```text
+2017-07-05T16:15:55.167-0700 [INFO ] hello world
+```
+
+(Note timestamps are removed in future examples for brevity.)
+
+### Create a new logger
+
+```go
+appLogger := hclog.New(&hclog.LoggerOptions{
+ Name: "my-app",
+ Level: hclog.LevelFromString("DEBUG"),
+})
+```
+
+### Emit an Info level message with 2 key/value pairs
+
+```go
+input := "5.5"
+_, err := strconv.ParseInt(input, 10, 32)
+if err != nil {
+ appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
+}
+```
+
+```text
+... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
+```
+
+### Create a new Logger for a major subsystem
+
+```go
+subsystemLogger := appLogger.Named("transport")
+subsystemLogger.Info("we are transporting something")
+```
+
+```text
+... [INFO ] my-app.transport: we are transporting something
+```
+
+Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
+reflecting both the application and subsystem names.
+
+### Create a new Logger with fixed key/value pairs
+
+Using `With()` will include a specific key-value pair in all messages emitted
+by that logger.
+
+```go
+requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
+requestLogger := subsystemLogger.With("request", requestID)
+requestLogger.Info("we are transporting a request")
+```
+
+```text
+... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
+```
+
+This allows sub Loggers to be context specific without having to thread that
+into all the callers.
+
+### Using `hclog.Fmt()`
+
+```go
+var int totalBandwidth = 200
+appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth))
+```
+
+```text
+... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s"
+```
+
+### Use this with code that uses the standard library logger
+
+If you want to use the standard library's `log.Logger` interface you can wrap
+`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
+it with the familiar `Println()`, `Printf()`, etc. For example:
+
+```go
+stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
+ InferLevels: true,
+})
+// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
+stdLogger.Printf("[DEBUG] %+v", stdLogger)
+```
+
+```text
+... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
+```
+
+Alternatively, you may configure the system-wide logger:
+
+```go
+// log the standard logger from 'import "log"'
+log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}))
+log.SetPrefix("")
+log.SetFlags(0)
+
+log.Printf("[DEBUG] %d", 42)
+```
+
+```text
+... [DEBUG] my-app: 42
+```
+
+Notice that if `appLogger` is initialized with the `INFO` log level _and_ you
+specify `InferLevels: true`, you will not see any output here. You must change
+`appLogger` to `DEBUG` to see output. See the docs for more information.
diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go
new file mode 100644
index 0000000..44aa9bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+package hclog
+
+import (
+ "github.com/mattn/go-isatty"
+)
+
+// setColorization will mutate the values of this logger
+// to approperately configure colorization options. It provides
+// a wrapper to the output stream on Windows systems.
+func (l *intLogger) setColorization(opts *LoggerOptions) {
+ switch opts.Color {
+ case ColorOff:
+ fallthrough
+ case ForceColor:
+ return
+ case AutoColor:
+ fi := l.checkWriterIsFile()
+ isUnixTerm := isatty.IsTerminal(fi.Fd())
+ isCygwinTerm := isatty.IsCygwinTerminal(fi.Fd())
+ isTerm := isUnixTerm || isCygwinTerm
+ if !isTerm {
+ l.writer.color = ColorOff
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go
new file mode 100644
index 0000000..23486b6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package hclog
+
+import (
+ "os"
+
+ colorable "github.com/mattn/go-colorable"
+ "github.com/mattn/go-isatty"
+)
+
+// setColorization will mutate the values of this logger
+// to approperately configure colorization options. It provides
+// a wrapper to the output stream on Windows systems.
+func (l *intLogger) setColorization(opts *LoggerOptions) {
+ switch opts.Color {
+ case ColorOff:
+ return
+ case ForceColor:
+ fi := l.checkWriterIsFile()
+ l.writer.w = colorable.NewColorable(fi)
+ case AutoColor:
+ fi := l.checkWriterIsFile()
+ isUnixTerm := isatty.IsTerminal(os.Stdout.Fd())
+ isCygwinTerm := isatty.IsCygwinTerminal(os.Stdout.Fd())
+ isTerm := isUnixTerm || isCygwinTerm
+ if !isTerm {
+ l.writer.color = ColorOff
+ return
+ }
+ l.writer.w = colorable.NewColorable(fi)
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go
new file mode 100644
index 0000000..7815f50
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/context.go
@@ -0,0 +1,38 @@
+package hclog
+
+import (
+ "context"
+)
+
+// WithContext inserts a logger into the context and is retrievable
+// with FromContext. The optional args can be set with the same syntax as
+// Logger.With to set fields on the inserted logger. This will not modify
+// the logger argument in-place.
+func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context {
+ // While we could call logger.With even with zero args, we have this
+ // check to avoid unnecessary allocations around creating a copy of a
+ // logger.
+ if len(args) > 0 {
+ logger = logger.With(args...)
+ }
+
+ return context.WithValue(ctx, contextKey, logger)
+}
+
+// FromContext returns a logger from the context. This will return L()
+// (the default logger) if no logger is found in the context. Therefore,
+// this will never return a nil value.
+func FromContext(ctx context.Context) Logger {
+ logger, _ := ctx.Value(contextKey).(Logger)
+ if logger == nil {
+ return L()
+ }
+
+ return logger
+}
+
+// Unexported new type so that our context key never collides with another.
+type contextKeyType struct{}
+
+// contextKey is the key used for the context to store the logger.
+var contextKey = contextKeyType{}
diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go
new file mode 100644
index 0000000..22ebc57
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/global.go
@@ -0,0 +1,62 @@
+package hclog
+
+import (
+ "sync"
+)
+
+var (
+ protect sync.Once
+ def Logger
+
+ // DefaultOptions is used to create the Default logger. These are read
+ // only when the Default logger is created, so set them as soon as the
+ // process starts.
+ DefaultOptions = &LoggerOptions{
+ Level: DefaultLevel,
+ Output: DefaultOutput,
+ }
+)
+
+// Default returns a globally held logger. This can be a good starting
+// place, and then you can use .With() and .Name() to create sub-loggers
+// to be used in more specific contexts.
+// The value of the Default logger can be set via SetDefault() or by
+// changing the options in DefaultOptions.
+//
+// This method is goroutine safe, returning a global from memory, but
+// cause should be used if SetDefault() is called it random times
+// in the program as that may result in race conditions and an unexpected
+// Logger being returned.
+func Default() Logger {
+ protect.Do(func() {
+ // If SetDefault was used before Default() was called, we need to
+ // detect that here.
+ if def == nil {
+ def = New(DefaultOptions)
+ }
+ })
+
+ return def
+}
+
+// L is a short alias for Default().
+func L() Logger {
+ return Default()
+}
+
+// SetDefault changes the logger to be returned by Default()and L()
+// to the one given. This allows packages to use the default logger
+// and have higher level packages change it to match the execution
+// environment. It returns any old default if there is one.
+//
+// NOTE: This is expected to be called early in the program to setup
+// a default logger. As such, it does not attempt to make itself
+// not racy with regard to the value of the default logger. Ergo
+// if it is called in goroutines, you may experience race conditions
+// with other goroutines retrieving the default logger. Basically,
+// don't do that.
+func SetDefault(log Logger) Logger {
+ old := def
+ def = log
+ return old
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod
new file mode 100644
index 0000000..b6698c0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/go.mod
@@ -0,0 +1,12 @@
+module github.com/hashicorp/go-hclog
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/fatih/color v1.7.0
+ github.com/mattn/go-colorable v0.1.4
+ github.com/mattn/go-isatty v0.0.10
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/testify v1.2.2
+)
+
+go 1.13
diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum
new file mode 100644
index 0000000..3a656df
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/go.sum
@@ -0,0 +1,18 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
new file mode 100644
index 0000000..7e86dc8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
@@ -0,0 +1,230 @@
+package hclog
+
+import (
+ "io"
+ "log"
+ "sync"
+ "sync/atomic"
+)
+
+var _ Logger = &interceptLogger{}
+
+type interceptLogger struct {
+ Logger
+
+ mu *sync.Mutex
+ sinkCount *int32
+ Sinks map[SinkAdapter]struct{}
+}
+
+func NewInterceptLogger(opts *LoggerOptions) InterceptLogger {
+ intercept := &interceptLogger{
+ Logger: New(opts),
+ mu: new(sync.Mutex),
+ sinkCount: new(int32),
+ Sinks: make(map[SinkAdapter]struct{}),
+ }
+
+ atomic.StoreInt32(intercept.sinkCount, 0)
+
+ return intercept
+}
+
+func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) {
+ i.Logger.Log(level, msg, args...)
+ if atomic.LoadInt32(i.sinkCount) == 0 {
+ return
+ }
+
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ for s := range i.Sinks {
+ s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...)
+ }
+}
+
+// Emit the message and args at TRACE level to log and sinks
+func (i *interceptLogger) Trace(msg string, args ...interface{}) {
+ i.Logger.Trace(msg, args...)
+ if atomic.LoadInt32(i.sinkCount) == 0 {
+ return
+ }
+
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ for s := range i.Sinks {
+ s.Accept(i.Name(), Trace, msg, i.retrieveImplied(args...)...)
+ }
+}
+
+// Emit the message and args at DEBUG level to log and sinks
+func (i *interceptLogger) Debug(msg string, args ...interface{}) {
+ i.Logger.Debug(msg, args...)
+ if atomic.LoadInt32(i.sinkCount) == 0 {
+ return
+ }
+
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ for s := range i.Sinks {
+ s.Accept(i.Name(), Debug, msg, i.retrieveImplied(args...)...)
+ }
+}
+
+// Emit the message and args at INFO level to log and sinks
+func (i *interceptLogger) Info(msg string, args ...interface{}) {
+ i.Logger.Info(msg, args...)
+ if atomic.LoadInt32(i.sinkCount) == 0 {
+ return
+ }
+
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ for s := range i.Sinks {
+ s.Accept(i.Name(), Info, msg, i.retrieveImplied(args...)...)
+ }
+}
+
+// Emit the message and args at WARN level to log and sinks
+func (i *interceptLogger) Warn(msg string, args ...interface{}) {
+ i.Logger.Warn(msg, args...)
+ if atomic.LoadInt32(i.sinkCount) == 0 {
+ return
+ }
+
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ for s := range i.Sinks {
+ s.Accept(i.Name(), Warn, msg, i.retrieveImplied(args...)...)
+ }
+}
+
+// Emit the message and args at ERROR level to log and sinks
+func (i *interceptLogger) Error(msg string, args ...interface{}) {
+ i.Logger.Error(msg, args...)
+ if atomic.LoadInt32(i.sinkCount) == 0 {
+ return
+ }
+
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ for s := range i.Sinks {
+ s.Accept(i.Name(), Error, msg, i.retrieveImplied(args...)...)
+ }
+}
+
+func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} {
+ top := i.Logger.ImpliedArgs()
+
+ cp := make([]interface{}, len(top)+len(args))
+ copy(cp, top)
+ copy(cp[len(top):], args)
+
+ return cp
+}
+
+// Create a new sub-Logger that a name decending from the current name.
+// This is used to create a subsystem specific Logger.
+// Registered sinks will subscribe to these messages as well.
+func (i *interceptLogger) Named(name string) Logger {
+ var sub interceptLogger
+
+ sub = *i
+
+ sub.Logger = i.Logger.Named(name)
+
+ return &sub
+}
+
+// Create a new sub-Logger with an explicit name. This ignores the current
+// name. This is used to create a standalone logger that doesn't fall
+// within the normal hierarchy. Registered sinks will subscribe
+// to these messages as well.
+func (i *interceptLogger) ResetNamed(name string) Logger {
+ var sub interceptLogger
+
+ sub = *i
+
+ sub.Logger = i.Logger.ResetNamed(name)
+
+ return &sub
+}
+
+// Create a new sub-Logger that a name decending from the current name.
+// This is used to create a subsystem specific Logger.
+// Registered sinks will subscribe to these messages as well.
+func (i *interceptLogger) NamedIntercept(name string) InterceptLogger {
+ var sub interceptLogger
+
+ sub = *i
+
+ sub.Logger = i.Logger.Named(name)
+
+ return &sub
+}
+
+// Create a new sub-Logger with an explicit name. This ignores the current
+// name. This is used to create a standalone logger that doesn't fall
+// within the normal hierarchy. Registered sinks will subscribe
+// to these messages as well.
+func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger {
+ var sub interceptLogger
+
+ sub = *i
+
+ sub.Logger = i.Logger.ResetNamed(name)
+
+ return &sub
+}
+
+// Return a sub-Logger for which every emitted log message will contain
+// the given key/value pairs. This is used to create a context specific
+// Logger.
+func (i *interceptLogger) With(args ...interface{}) Logger {
+ var sub interceptLogger
+
+ sub = *i
+
+ sub.Logger = i.Logger.With(args...)
+
+ return &sub
+}
+
+// RegisterSink attaches a SinkAdapter to interceptLoggers sinks.
+func (i *interceptLogger) RegisterSink(sink SinkAdapter) {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ i.Sinks[sink] = struct{}{}
+
+ atomic.AddInt32(i.sinkCount, 1)
+}
+
+// DeregisterSink removes a SinkAdapter from interceptLoggers sinks.
+func (i *interceptLogger) DeregisterSink(sink SinkAdapter) {
+ i.mu.Lock()
+ defer i.mu.Unlock()
+
+ delete(i.Sinks, sink)
+
+ atomic.AddInt32(i.sinkCount, -1)
+}
+
+// Create a *log.Logger that will send it's data through this Logger. This
+// allows packages that expect to be using the standard library to log to
+// actually use this logger, which will also send to any registered sinks.
+func (l *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger {
+ if opts == nil {
+ opts = &StandardLoggerOptions{}
+ }
+
+ return log.New(l.StandardWriterIntercept(opts), "", 0)
+}
+
+func (l *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer {
+ return &stdlogAdapter{
+ log: l,
+ inferLevels: opts.InferLevels,
+ forceLevel: opts.ForceLevel,
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go
new file mode 100644
index 0000000..5d76ee3
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go
@@ -0,0 +1,648 @@
+package hclog
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "reflect"
+ "regexp"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/fatih/color"
+)
+
+// TimeFormat to use for logging. This is a version of RFC3339 that contains
+// contains millisecond precision
+const TimeFormat = "2006-01-02T15:04:05.000Z0700"
+
+// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json
+const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json"
+
+var (
+ _levelToBracket = map[Level]string{
+ Debug: "[DEBUG]",
+ Trace: "[TRACE]",
+ Info: "[INFO] ",
+ Warn: "[WARN] ",
+ Error: "[ERROR]",
+ }
+
+ _levelToColor = map[Level]*color.Color{
+ Debug: color.New(color.FgHiWhite),
+ Trace: color.New(color.FgHiGreen),
+ Info: color.New(color.FgHiBlue),
+ Warn: color.New(color.FgHiYellow),
+ Error: color.New(color.FgHiRed),
+ }
+)
+
+// Make sure that intLogger is a Logger
+var _ Logger = &intLogger{}
+
+// intLogger is an internal logger implementation. Internal in that it is
+// defined entirely by this package.
+type intLogger struct {
+ json bool
+ caller bool
+ name string
+ timeFormat string
+
+ // This is a pointer so that it's shared by any derived loggers, since
+ // those derived loggers share the bufio.Writer as well.
+ mutex *sync.Mutex
+ writer *writer
+ level *int32
+
+ implied []interface{}
+}
+
+// New returns a configured logger.
+func New(opts *LoggerOptions) Logger {
+ return newLogger(opts)
+}
+
+// NewSinkAdapter returns a SinkAdapter with configured settings
+// defined by LoggerOptions
+func NewSinkAdapter(opts *LoggerOptions) SinkAdapter {
+ return newLogger(opts)
+}
+
+func newLogger(opts *LoggerOptions) *intLogger {
+ if opts == nil {
+ opts = &LoggerOptions{}
+ }
+
+ output := opts.Output
+ if output == nil {
+ output = DefaultOutput
+ }
+
+ level := opts.Level
+ if level == NoLevel {
+ level = DefaultLevel
+ }
+
+ mutex := opts.Mutex
+ if mutex == nil {
+ mutex = new(sync.Mutex)
+ }
+
+ l := &intLogger{
+ json: opts.JSONFormat,
+ caller: opts.IncludeLocation,
+ name: opts.Name,
+ timeFormat: TimeFormat,
+ mutex: mutex,
+ writer: newWriter(output, opts.Color),
+ level: new(int32),
+ }
+
+ l.setColorization(opts)
+
+ if opts.TimeFormat != "" {
+ l.timeFormat = opts.TimeFormat
+ }
+
+ atomic.StoreInt32(l.level, int32(level))
+
+ return l
+}
+
+// Log a message and a set of key/value pairs if the given level is at
+// or more severe that the threshold configured in the Logger.
+func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) {
+ if level < Level(atomic.LoadInt32(l.level)) {
+ return
+ }
+
+ t := time.Now()
+
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if l.json {
+ l.logJSON(t, name, level, msg, args...)
+ } else {
+ l.logPlain(t, name, level, msg, args...)
+ }
+
+ l.writer.Flush(level)
+}
+
+// Cleanup a path by returning the last 2 segments of the path only.
+func trimCallerPath(path string) string {
+ // lovely borrowed from zap
+ // nb. To make sure we trim the path correctly on Windows too, we
+ // counter-intuitively need to use '/' and *not* os.PathSeparator here,
+ // because the path given originates from Go stdlib, specifically
+ // runtime.Caller() which (as of Mar/17) returns forward slashes even on
+ // Windows.
+ //
+ // See https://github.com/golang/go/issues/3335
+ // and https://github.com/golang/go/issues/18151
+ //
+ // for discussion on the issue on Go side.
+
+ // Find the last separator.
+ idx := strings.LastIndexByte(path, '/')
+ if idx == -1 {
+ return path
+ }
+
+ // Find the penultimate separator.
+ idx = strings.LastIndexByte(path[:idx], '/')
+ if idx == -1 {
+ return path
+ }
+
+ return path[idx+1:]
+}
+
+var logImplFile = regexp.MustCompile(`github.com/hashicorp/go-hclog/.+logger.go$`)
+
+// Non-JSON logging format function
+func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) {
+ l.writer.WriteString(t.Format(l.timeFormat))
+ l.writer.WriteByte(' ')
+
+ s, ok := _levelToBracket[level]
+ if ok {
+ l.writer.WriteString(s)
+ } else {
+ l.writer.WriteString("[?????]")
+ }
+
+ offset := 3
+ if l.caller {
+ // Check if the caller is inside our package and inside
+ // a logger implementation file
+ if _, file, _, ok := runtime.Caller(3); ok {
+ match := logImplFile.MatchString(file)
+ if match {
+ offset = 4
+ }
+ }
+
+ if _, file, line, ok := runtime.Caller(offset); ok {
+ l.writer.WriteByte(' ')
+ l.writer.WriteString(trimCallerPath(file))
+ l.writer.WriteByte(':')
+ l.writer.WriteString(strconv.Itoa(line))
+ l.writer.WriteByte(':')
+ }
+ }
+
+ l.writer.WriteByte(' ')
+
+ if name != "" {
+ l.writer.WriteString(name)
+ l.writer.WriteString(": ")
+ }
+
+ l.writer.WriteString(msg)
+
+ args = append(l.implied, args...)
+
+ var stacktrace CapturedStacktrace
+
+ if args != nil && len(args) > 0 {
+ if len(args)%2 != 0 {
+ cs, ok := args[len(args)-1].(CapturedStacktrace)
+ if ok {
+ args = args[:len(args)-1]
+ stacktrace = cs
+ } else {
+ extra := args[len(args)-1]
+ args = append(args[:len(args)-1], MissingKey, extra)
+ }
+ }
+
+ l.writer.WriteByte(':')
+
+ FOR:
+ for i := 0; i < len(args); i = i + 2 {
+ var (
+ val string
+ raw bool
+ )
+
+ switch st := args[i+1].(type) {
+ case string:
+ val = st
+ case int:
+ val = strconv.FormatInt(int64(st), 10)
+ case int64:
+ val = strconv.FormatInt(int64(st), 10)
+ case int32:
+ val = strconv.FormatInt(int64(st), 10)
+ case int16:
+ val = strconv.FormatInt(int64(st), 10)
+ case int8:
+ val = strconv.FormatInt(int64(st), 10)
+ case uint:
+ val = strconv.FormatUint(uint64(st), 10)
+ case uint64:
+ val = strconv.FormatUint(uint64(st), 10)
+ case uint32:
+ val = strconv.FormatUint(uint64(st), 10)
+ case uint16:
+ val = strconv.FormatUint(uint64(st), 10)
+ case uint8:
+ val = strconv.FormatUint(uint64(st), 10)
+ case CapturedStacktrace:
+ stacktrace = st
+ continue FOR
+ case Format:
+ val = fmt.Sprintf(st[0].(string), st[1:]...)
+ default:
+ v := reflect.ValueOf(st)
+ if v.Kind() == reflect.Slice {
+ val = l.renderSlice(v)
+ raw = true
+ } else {
+ val = fmt.Sprintf("%v", st)
+ }
+ }
+
+ l.writer.WriteByte(' ')
+ switch st := args[i].(type) {
+ case string:
+ l.writer.WriteString(st)
+ default:
+ l.writer.WriteString(fmt.Sprintf("%s", st))
+ }
+ l.writer.WriteByte('=')
+
+ if !raw && strings.ContainsAny(val, " \t\n\r") {
+ l.writer.WriteByte('"')
+ l.writer.WriteString(val)
+ l.writer.WriteByte('"')
+ } else {
+ l.writer.WriteString(val)
+ }
+ }
+ }
+
+ l.writer.WriteString("\n")
+
+ if stacktrace != "" {
+ l.writer.WriteString(string(stacktrace))
+ }
+}
+
+func (l *intLogger) renderSlice(v reflect.Value) string {
+ var buf bytes.Buffer
+
+ buf.WriteRune('[')
+
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+
+ sv := v.Index(i)
+
+ var val string
+
+ switch sv.Kind() {
+ case reflect.String:
+ val = sv.String()
+ case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64:
+ val = strconv.FormatInt(sv.Int(), 10)
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ val = strconv.FormatUint(sv.Uint(), 10)
+ default:
+ val = fmt.Sprintf("%v", sv.Interface())
+ }
+
+ if strings.ContainsAny(val, " \t\n\r") {
+ buf.WriteByte('"')
+ buf.WriteString(val)
+ buf.WriteByte('"')
+ } else {
+ buf.WriteString(val)
+ }
+ }
+
+ buf.WriteRune(']')
+
+ return buf.String()
+}
+
+// JSON logging function
+func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) {
+ vals := l.jsonMapEntry(t, name, level, msg)
+ args = append(l.implied, args...)
+
+ if args != nil && len(args) > 0 {
+ if len(args)%2 != 0 {
+ cs, ok := args[len(args)-1].(CapturedStacktrace)
+ if ok {
+ args = args[:len(args)-1]
+ vals["stacktrace"] = cs
+ } else {
+ extra := args[len(args)-1]
+ args = append(args[:len(args)-1], MissingKey, extra)
+ }
+ }
+
+ for i := 0; i < len(args); i = i + 2 {
+ val := args[i+1]
+ switch sv := val.(type) {
+ case error:
+ // Check if val is of type error. If error type doesn't
+ // implement json.Marshaler or encoding.TextMarshaler
+ // then set val to err.Error() so that it gets marshaled
+ switch sv.(type) {
+ case json.Marshaler, encoding.TextMarshaler:
+ default:
+ val = sv.Error()
+ }
+ case Format:
+ val = fmt.Sprintf(sv[0].(string), sv[1:]...)
+ }
+
+ var key string
+
+ switch st := args[i].(type) {
+ case string:
+ key = st
+ default:
+ key = fmt.Sprintf("%s", st)
+ }
+ vals[key] = val
+ }
+ }
+
+ err := json.NewEncoder(l.writer).Encode(vals)
+ if err != nil {
+ if _, ok := err.(*json.UnsupportedTypeError); ok {
+ plainVal := l.jsonMapEntry(t, name, level, msg)
+ plainVal["@warn"] = errJsonUnsupportedTypeMsg
+
+ json.NewEncoder(l.writer).Encode(plainVal)
+ }
+ }
+}
+
+func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} {
+ vals := map[string]interface{}{
+ "@message": msg,
+ "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"),
+ }
+
+ var levelStr string
+ switch level {
+ case Error:
+ levelStr = "error"
+ case Warn:
+ levelStr = "warn"
+ case Info:
+ levelStr = "info"
+ case Debug:
+ levelStr = "debug"
+ case Trace:
+ levelStr = "trace"
+ default:
+ levelStr = "all"
+ }
+
+ vals["@level"] = levelStr
+
+ if name != "" {
+ vals["@module"] = name
+ }
+
+ if l.caller {
+ if _, file, line, ok := runtime.Caller(4); ok {
+ vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
+ }
+ }
+ return vals
+}
+
+// Emit the message and args at the provided level
+func (l *intLogger) Log(level Level, msg string, args ...interface{}) {
+ l.log(l.Name(), level, msg, args...)
+}
+
+// Emit the message and args at DEBUG level
+func (l *intLogger) Debug(msg string, args ...interface{}) {
+ l.log(l.Name(), Debug, msg, args...)
+}
+
+// Emit the message and args at TRACE level
+func (l *intLogger) Trace(msg string, args ...interface{}) {
+ l.log(l.Name(), Trace, msg, args...)
+}
+
+// Emit the message and args at INFO level
+func (l *intLogger) Info(msg string, args ...interface{}) {
+ l.log(l.Name(), Info, msg, args...)
+}
+
+// Emit the message and args at WARN level
+func (l *intLogger) Warn(msg string, args ...interface{}) {
+ l.log(l.Name(), Warn, msg, args...)
+}
+
+// Emit the message and args at ERROR level
+func (l *intLogger) Error(msg string, args ...interface{}) {
+ l.log(l.Name(), Error, msg, args...)
+}
+
+// Indicate that the logger would emit TRACE level logs
+func (l *intLogger) IsTrace() bool {
+ return Level(atomic.LoadInt32(l.level)) == Trace
+}
+
+// Indicate that the logger would emit DEBUG level logs
+func (l *intLogger) IsDebug() bool {
+ return Level(atomic.LoadInt32(l.level)) <= Debug
+}
+
+// Indicate that the logger would emit INFO level logs
+func (l *intLogger) IsInfo() bool {
+ return Level(atomic.LoadInt32(l.level)) <= Info
+}
+
+// Indicate that the logger would emit WARN level logs
+func (l *intLogger) IsWarn() bool {
+ return Level(atomic.LoadInt32(l.level)) <= Warn
+}
+
+// Indicate that the logger would emit ERROR level logs
+func (l *intLogger) IsError() bool {
+ return Level(atomic.LoadInt32(l.level)) <= Error
+}
+
+const MissingKey = "EXTRA_VALUE_AT_END"
+
+// Return a sub-Logger for which every emitted log message will contain
+// the given key/value pairs. This is used to create a context specific
+// Logger.
+func (l *intLogger) With(args ...interface{}) Logger {
+ var extra interface{}
+
+ if len(args)%2 != 0 {
+ extra = args[len(args)-1]
+ args = args[:len(args)-1]
+ }
+
+ sl := *l
+
+ result := make(map[string]interface{}, len(l.implied)+len(args))
+ keys := make([]string, 0, len(l.implied)+len(args))
+
+ // Read existing args, store map and key for consistent sorting
+ for i := 0; i < len(l.implied); i += 2 {
+ key := l.implied[i].(string)
+ keys = append(keys, key)
+ result[key] = l.implied[i+1]
+ }
+ // Read new args, store map and key for consistent sorting
+ for i := 0; i < len(args); i += 2 {
+ key := args[i].(string)
+ _, exists := result[key]
+ if !exists {
+ keys = append(keys, key)
+ }
+ result[key] = args[i+1]
+ }
+
+ // Sort keys to be consistent
+ sort.Strings(keys)
+
+ sl.implied = make([]interface{}, 0, len(l.implied)+len(args))
+ for _, k := range keys {
+ sl.implied = append(sl.implied, k)
+ sl.implied = append(sl.implied, result[k])
+ }
+
+ if extra != nil {
+ sl.implied = append(sl.implied, MissingKey, extra)
+ }
+
+ return &sl
+}
+
+// Create a new sub-Logger that a name decending from the current name.
+// This is used to create a subsystem specific Logger.
+func (l *intLogger) Named(name string) Logger {
+ sl := *l
+
+ if sl.name != "" {
+ sl.name = sl.name + "." + name
+ } else {
+ sl.name = name
+ }
+
+ return &sl
+}
+
+// Create a new sub-Logger with an explicit name. This ignores the current
+// name. This is used to create a standalone logger that doesn't fall
+// within the normal hierarchy.
+func (l *intLogger) ResetNamed(name string) Logger {
+ sl := *l
+
+ sl.name = name
+
+ return &sl
+}
+
+func (l *intLogger) ResetOutput(opts *LoggerOptions) error {
+ if opts.Output == nil {
+ return errors.New("given output is nil")
+ }
+
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ return l.resetOutput(opts)
+}
+
+func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
+ if opts.Output == nil {
+ return errors.New("given output is nil")
+ }
+ if flushable == nil {
+ return errors.New("flushable is nil")
+ }
+
+ l.mutex.Lock()
+ defer l.mutex.Unlock()
+
+ if err := flushable.Flush(); err != nil {
+ return err
+ }
+
+ return l.resetOutput(opts)
+}
+
+func (l *intLogger) resetOutput(opts *LoggerOptions) error {
+ l.writer = newWriter(opts.Output, opts.Color)
+ l.setColorization(opts)
+ return nil
+}
+
+// Update the logging level on-the-fly. This will affect all subloggers as
+// well.
+func (l *intLogger) SetLevel(level Level) {
+ atomic.StoreInt32(l.level, int32(level))
+}
+
+// Create a *log.Logger that will send it's data through this Logger. This
+// allows packages that expect to be using the standard library log to actually
+// use this logger.
+func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
+ if opts == nil {
+ opts = &StandardLoggerOptions{}
+ }
+
+ return log.New(l.StandardWriter(opts), "", 0)
+}
+
+func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
+ return &stdlogAdapter{
+ log: l,
+ inferLevels: opts.InferLevels,
+ forceLevel: opts.ForceLevel,
+ }
+}
+
+// checks if the underlying io.Writer is a file, and
+// panics if not. For use by colorization.
+func (l *intLogger) checkWriterIsFile() *os.File {
+ fi, ok := l.writer.w.(*os.File)
+ if !ok {
+ panic("Cannot enable coloring of non-file Writers")
+ }
+ return fi
+}
+
+// Accept implements the SinkAdapter interface
+func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) {
+ i.log(name, level, msg, args...)
+}
+
+// ImpliedArgs returns the loggers implied args
+func (i *intLogger) ImpliedArgs() []interface{} {
+ return i.implied
+}
+
+// Name returns the loggers name
+func (i *intLogger) Name() string {
+ return i.name
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go
new file mode 100644
index 0000000..147bd2d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/logger.go
@@ -0,0 +1,262 @@
+package hclog
+
+import (
+ "io"
+ "log"
+ "os"
+ "strings"
+ "sync"
+)
+
+var (
+ //DefaultOutput is used as the default log output.
+ DefaultOutput io.Writer = os.Stderr
+
+ // DefaultLevel is used as the default log level.
+ DefaultLevel = Info
+)
+
+// Level represents a log level.
+type Level int32
+
+const (
+ // NoLevel is a special level used to indicate that no level has been
+ // set and allow for a default to be used.
+ NoLevel Level = 0
+
+ // Trace is the most verbose level. Intended to be used for the tracing
+ // of actions in code, such as function enters/exits, etc.
+ Trace Level = 1
+
+ // Debug information for programmer lowlevel analysis.
+ Debug Level = 2
+
+ // Info information about steady state operations.
+ Info Level = 3
+
+ // Warn information about rare but handled events.
+ Warn Level = 4
+
+ // Error information about unrecoverable events.
+ Error Level = 5
+)
+
+// Format is a simple convience type for when formatting is required. When
+// processing a value of this type, the logger automatically treats the first
+// argument as a Printf formatting string and passes the rest as the values
+// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}).
+type Format []interface{}
+
+// Fmt returns a Format type. This is a convience function for creating a Format
+// type.
+func Fmt(str string, args ...interface{}) Format {
+ return append(Format{str}, args...)
+}
+
+// ColorOption expresses how the output should be colored, if at all.
+type ColorOption uint8
+
+const (
+ // ColorOff is the default coloration, and does not
+ // inject color codes into the io.Writer.
+ ColorOff ColorOption = iota
+ // AutoColor checks if the io.Writer is a tty,
+ // and if so enables coloring.
+ AutoColor
+ // ForceColor will enable coloring, regardless of whether
+ // the io.Writer is a tty or not.
+ ForceColor
+)
+
+// LevelFromString returns a Level type for the named log level, or "NoLevel" if
+// the level string is invalid. This facilitates setting the log level via
+// config or environment variable by name in a predictable way.
+func LevelFromString(levelStr string) Level {
+ // We don't care about case. Accept both "INFO" and "info".
+ levelStr = strings.ToLower(strings.TrimSpace(levelStr))
+ switch levelStr {
+ case "trace":
+ return Trace
+ case "debug":
+ return Debug
+ case "info":
+ return Info
+ case "warn":
+ return Warn
+ case "error":
+ return Error
+ default:
+ return NoLevel
+ }
+}
+
+// Logger describes the interface that must be implemeted by all loggers.
+type Logger interface {
+ // Args are alternating key, val pairs
+ // keys must be strings
+ // vals can be any type, but display is implementation specific
+ // Emit a message and key/value pairs at a provided log level
+ Log(level Level, msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the TRACE level
+ Trace(msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the DEBUG level
+ Debug(msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the INFO level
+ Info(msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the WARN level
+ Warn(msg string, args ...interface{})
+
+ // Emit a message and key/value pairs at the ERROR level
+ Error(msg string, args ...interface{})
+
+ // Indicate if TRACE logs would be emitted. This and the other Is* guards
+ // are used to elide expensive logging code based on the current level.
+ IsTrace() bool
+
+ // Indicate if DEBUG logs would be emitted. This and the other Is* guards
+ IsDebug() bool
+
+ // Indicate if INFO logs would be emitted. This and the other Is* guards
+ IsInfo() bool
+
+ // Indicate if WARN logs would be emitted. This and the other Is* guards
+ IsWarn() bool
+
+ // Indicate if ERROR logs would be emitted. This and the other Is* guards
+ IsError() bool
+
+ // ImpliedArgs returns With key/value pairs
+ ImpliedArgs() []interface{}
+
+ // Creates a sublogger that will always have the given key/value pairs
+ With(args ...interface{}) Logger
+
+ // Returns the Name of the logger
+ Name() string
+
+ // Create a logger that will prepend the name string on the front of all messages.
+ // If the logger already has a name, the new value will be appended to the current
+ // name. That way, a major subsystem can use this to decorate all it's own logs
+ // without losing context.
+ Named(name string) Logger
+
+ // Create a logger that will prepend the name string on the front of all messages.
+ // This sets the name of the logger to the value directly, unlike Named which honor
+ // the current name as well.
+ ResetNamed(name string) Logger
+
+ // Updates the level. This should affect all sub-loggers as well. If an
+ // implementation cannot update the level on the fly, it should no-op.
+ SetLevel(level Level)
+
+ // Return a value that conforms to the stdlib log.Logger interface
+ StandardLogger(opts *StandardLoggerOptions) *log.Logger
+
+ // Return a value that conforms to io.Writer, which can be passed into log.SetOutput()
+ StandardWriter(opts *StandardLoggerOptions) io.Writer
+}
+
+// StandardLoggerOptions can be used to configure a new standard logger.
+type StandardLoggerOptions struct {
+ // Indicate that some minimal parsing should be done on strings to try
+ // and detect their level and re-emit them.
+ // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
+ // [DEBUG] and strip it off before reapplying it.
+ InferLevels bool
+
+ // ForceLevel is used to force all output from the standard logger to be at
+ // the specified level. Similar to InferLevels, this will strip any level
+ // prefix contained in the logged string before applying the forced level.
+ // If set, this override InferLevels.
+ ForceLevel Level
+}
+
+// LoggerOptions can be used to configure a new logger.
+type LoggerOptions struct {
+ // Name of the subsystem to prefix logs with
+ Name string
+
+ // The threshold for the logger. Anything less severe is supressed
+ Level Level
+
+ // Where to write the logs to. Defaults to os.Stderr if nil
+ Output io.Writer
+
+ // An optional mutex pointer in case Output is shared
+ Mutex *sync.Mutex
+
+ // Control if the output should be in JSON.
+ JSONFormat bool
+
+ // Include file and line information in each log line
+ IncludeLocation bool
+
+ // The time format to use instead of the default
+ TimeFormat string
+
+ // Color the output. On Windows, colored logs are only avaiable for io.Writers that
+ // are concretely instances of *os.File.
+ Color ColorOption
+}
+
+// InterceptLogger describes the interface for using a logger
+// that can register different output sinks.
+// This is useful for sending lower level log messages
+// to a different output while keeping the root logger
+// at a higher one.
+type InterceptLogger interface {
+ // Logger is the root logger for an InterceptLogger
+ Logger
+
+ // RegisterSink adds a SinkAdapter to the InterceptLogger
+ RegisterSink(sink SinkAdapter)
+
+ // DeregisterSink removes a SinkAdapter from the InterceptLogger
+ DeregisterSink(sink SinkAdapter)
+
+ // Create a interceptlogger that will prepend the name string on the front of all messages.
+ // If the logger already has a name, the new value will be appended to the current
+ // name. That way, a major subsystem can use this to decorate all it's own logs
+ // without losing context.
+ NamedIntercept(name string) InterceptLogger
+
+ // Create a interceptlogger that will prepend the name string on the front of all messages.
+ // This sets the name of the logger to the value directly, unlike Named which honor
+ // the current name as well.
+ ResetNamedIntercept(name string) InterceptLogger
+
+ // Return a value that conforms to the stdlib log.Logger interface
+ StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger
+
+ // Return a value that conforms to io.Writer, which can be passed into log.SetOutput()
+ StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer
+}
+
+// SinkAdapter describes the interface that must be implemented
+// in order to Register a new sink to an InterceptLogger
+type SinkAdapter interface {
+ Accept(name string, level Level, msg string, args ...interface{})
+}
+
+// Flushable represents a method for flushing an output buffer. It can be used
+// if Resetting the log to use a new output, in order to flush the writes to
+// the existing output beforehand.
+type Flushable interface {
+ Flush() error
+}
+
+// OutputResettable provides ways to swap the output in use at runtime
+type OutputResettable interface {
+ // ResetOutput swaps the current output writer with the one given in the
+ // opts. Color options given in opts will be used for the new output.
+ ResetOutput(opts *LoggerOptions) error
+
+ // ResetOutputWithFlush swaps the current output writer with the one given
+ // in the opts, first calling Flush on the given Flushable. Color options
+ // given in opts will be used for the new output.
+ ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go
new file mode 100644
index 0000000..bc14f77
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go
@@ -0,0 +1,58 @@
+package hclog
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+)
+
+// NewNullLogger instantiates a Logger for which all calls
+// will succeed without doing anything.
+// Useful for testing purposes.
+func NewNullLogger() Logger {
+ return &nullLogger{}
+}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {}
+
+func (l *nullLogger) Trace(msg string, args ...interface{}) {}
+
+func (l *nullLogger) Debug(msg string, args ...interface{}) {}
+
+func (l *nullLogger) Info(msg string, args ...interface{}) {}
+
+func (l *nullLogger) Warn(msg string, args ...interface{}) {}
+
+func (l *nullLogger) Error(msg string, args ...interface{}) {}
+
+func (l *nullLogger) IsTrace() bool { return false }
+
+func (l *nullLogger) IsDebug() bool { return false }
+
+func (l *nullLogger) IsInfo() bool { return false }
+
+func (l *nullLogger) IsWarn() bool { return false }
+
+func (l *nullLogger) IsError() bool { return false }
+
+func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} }
+
+func (l *nullLogger) With(args ...interface{}) Logger { return l }
+
+func (l *nullLogger) Name() string { return "" }
+
+func (l *nullLogger) Named(name string) Logger { return l }
+
+func (l *nullLogger) ResetNamed(name string) Logger { return l }
+
+func (l *nullLogger) SetLevel(level Level) {}
+
+func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
+ return log.New(l.StandardWriter(opts), "", log.LstdFlags)
+}
+
+func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
+ return ioutil.Discard
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go
new file mode 100644
index 0000000..9b27bd3
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go
@@ -0,0 +1,109 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package hclog
+
+import (
+ "bytes"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+var (
+ _stacktraceIgnorePrefixes = []string{
+ "runtime.goexit",
+ "runtime.main",
+ }
+ _stacktracePool = sync.Pool{
+ New: func() interface{} {
+ return newProgramCounters(64)
+ },
+ }
+)
+
+// CapturedStacktrace represents a stacktrace captured by a previous call
+// to log.Stacktrace. If passed to a logging function, the stacktrace
+// will be appended.
+type CapturedStacktrace string
+
+// Stacktrace captures a stacktrace of the current goroutine and returns
+// it to be passed to a logging function.
+func Stacktrace() CapturedStacktrace {
+ return CapturedStacktrace(takeStacktrace())
+}
+
+func takeStacktrace() string {
+ programCounters := _stacktracePool.Get().(*programCounters)
+ defer _stacktracePool.Put(programCounters)
+
+ var buffer bytes.Buffer
+
+ for {
+ // Skip the call to runtime.Counters and takeStacktrace so that the
+ // program counters start at the caller of takeStacktrace.
+ n := runtime.Callers(2, programCounters.pcs)
+ if n < cap(programCounters.pcs) {
+ programCounters.pcs = programCounters.pcs[:n]
+ break
+ }
+ // Don't put the too-short counter slice back into the pool; this lets
+ // the pool adjust if we consistently take deep stacktraces.
+ programCounters = newProgramCounters(len(programCounters.pcs) * 2)
+ }
+
+ i := 0
+ frames := runtime.CallersFrames(programCounters.pcs)
+ for frame, more := frames.Next(); more; frame, more = frames.Next() {
+ if shouldIgnoreStacktraceFunction(frame.Function) {
+ continue
+ }
+ if i != 0 {
+ buffer.WriteByte('\n')
+ }
+ i++
+ buffer.WriteString(frame.Function)
+ buffer.WriteByte('\n')
+ buffer.WriteByte('\t')
+ buffer.WriteString(frame.File)
+ buffer.WriteByte(':')
+ buffer.WriteString(strconv.Itoa(int(frame.Line)))
+ }
+
+ return buffer.String()
+}
+
+func shouldIgnoreStacktraceFunction(function string) bool {
+ for _, prefix := range _stacktraceIgnorePrefixes {
+ if strings.HasPrefix(function, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+type programCounters struct {
+ pcs []uintptr
+}
+
+func newProgramCounters(size int) *programCounters {
+ return &programCounters{make([]uintptr, size)}
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go
new file mode 100644
index 0000000..2cf0456
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go
@@ -0,0 +1,74 @@
+package hclog
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Provides a io.Writer to shim the data out of *log.Logger
+// and back into our Logger. This is basically the only way to
+// build upon *log.Logger.
+type stdlogAdapter struct {
+ log Logger
+ inferLevels bool
+ forceLevel Level
+}
+
+// Take the data, infer the levels if configured, and send it through
+// a regular Logger.
+func (s *stdlogAdapter) Write(data []byte) (int, error) {
+ str := string(bytes.TrimRight(data, " \t\n"))
+
+ if s.forceLevel != NoLevel {
+ // Use pickLevel to strip log levels included in the line since we are
+ // forcing the level
+ _, str := s.pickLevel(str)
+
+ // Log at the forced level
+ s.dispatch(str, s.forceLevel)
+ } else if s.inferLevels {
+ level, str := s.pickLevel(str)
+ s.dispatch(str, level)
+ } else {
+ s.log.Info(str)
+ }
+
+ return len(data), nil
+}
+
+func (s *stdlogAdapter) dispatch(str string, level Level) {
+ switch level {
+ case Trace:
+ s.log.Trace(str)
+ case Debug:
+ s.log.Debug(str)
+ case Info:
+ s.log.Info(str)
+ case Warn:
+ s.log.Warn(str)
+ case Error:
+ s.log.Error(str)
+ default:
+ s.log.Info(str)
+ }
+}
+
+// Detect, based on conventions, what log level this is.
+func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
+ switch {
+ case strings.HasPrefix(str, "[DEBUG]"):
+ return Debug, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[TRACE]"):
+ return Trace, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[INFO]"):
+ return Info, strings.TrimSpace(str[6:])
+ case strings.HasPrefix(str, "[WARN]"):
+ return Warn, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[ERROR]"):
+ return Error, strings.TrimSpace(str[7:])
+ case strings.HasPrefix(str, "[ERR]"):
+ return Error, strings.TrimSpace(str[5:])
+ default:
+ return Info, str
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go
new file mode 100644
index 0000000..421a1f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/writer.go
@@ -0,0 +1,82 @@
+package hclog
+
+import (
+ "bytes"
+ "io"
+)
+
+type writer struct {
+ b bytes.Buffer
+ w io.Writer
+ color ColorOption
+}
+
+func newWriter(w io.Writer, color ColorOption) *writer {
+ return &writer{w: w, color: color}
+}
+
+func (w *writer) Flush(level Level) (err error) {
+ var unwritten = w.b.Bytes()
+
+ if w.color != ColorOff {
+ color := _levelToColor[level]
+ unwritten = []byte(color.Sprintf("%s", unwritten))
+ }
+
+ if lw, ok := w.w.(LevelWriter); ok {
+ _, err = lw.LevelWrite(level, unwritten)
+ } else {
+ _, err = w.w.Write(unwritten)
+ }
+ w.b.Reset()
+ return err
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+ return w.b.Write(p)
+}
+
+func (w *writer) WriteByte(c byte) error {
+ return w.b.WriteByte(c)
+}
+
+func (w *writer) WriteString(s string) (int, error) {
+ return w.b.WriteString(s)
+}
+
+// LevelWriter is the interface that wraps the LevelWrite method.
+type LevelWriter interface {
+ LevelWrite(level Level, p []byte) (n int, err error)
+}
+
+// LeveledWriter writes all log messages to the standard writer,
+// except for log levels that are defined in the overrides map.
+type LeveledWriter struct {
+ standard io.Writer
+ overrides map[Level]io.Writer
+}
+
+// NewLeveledWriter returns an initialized LeveledWriter.
+//
+// standard will be used as the default writer for all log levels,
+// except for log levels that are defined in the overrides map.
+func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter {
+ return &LeveledWriter{
+ standard: standard,
+ overrides: overrides,
+ }
+}
+
+// Write implements io.Writer.
+func (lw *LeveledWriter) Write(p []byte) (int, error) {
+ return lw.standard.Write(p)
+}
+
+// LevelWrite implements LevelWriter.
+func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) {
+ w, ok := lw.overrides[level]
+ if !ok {
+ w = lw.standard
+ }
+ return w.Write(p)
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml
new file mode 100644
index 0000000..1a0bbea
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go:
+ - tip
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
new file mode 100644
index 0000000..dd7c0ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
@@ -0,0 +1,9 @@
+# 1.1.0 (May 22nd, 2019)
+
+FEATURES
+
+* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
+
+# 1.0.0 (August 30th, 2018)
+
+* go mod adopted
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md
new file mode 100644
index 0000000..4b6338b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md
@@ -0,0 +1,66 @@
+go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix)
+=========
+
+Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
+The package only provides a single `Tree` implementation, optimized for sparse nodes.
+
+As a radix tree, it provides the following:
+ * O(k) operations. In many cases, this can be faster than a hash table since
+ the hash function is an O(k) operation, and hash tables have very poor cache locality.
+ * Minimum / Maximum value lookups
+ * Ordered iteration
+
+A tree supports using a transaction to batch multiple updates (insert, delete)
+in a more efficient manner than performing each operation one at a time.
+
+For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
+
+Example
+=======
+
+Below is a simple example of usage
+
+```go
+// Create a tree
+r := iradix.New()
+r, _, _ = r.Insert([]byte("foo"), 1)
+r, _, _ = r.Insert([]byte("bar"), 2)
+r, _, _ = r.Insert([]byte("foobar"), 2)
+
+// Find the longest prefix match
+m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
+if string(m) != "foo" {
+ panic("should be foo")
+}
+```
+
+Here is an example of performing a range scan of the keys.
+
+```go
+// Create a tree
+r := iradix.New()
+r, _, _ = r.Insert([]byte("001"), 1)
+r, _, _ = r.Insert([]byte("002"), 2)
+r, _, _ = r.Insert([]byte("005"), 5)
+r, _, _ = r.Insert([]byte("010"), 10)
+r, _, _ = r.Insert([]byte("100"), 10)
+
+// Range scan over the keys that sort lexicographically between [003, 050)
+it := r.Root().Iterator()
+it.SeekLowerBound([]byte("003"))
+for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
+ if key >= "050" {
+ break
+ }
+ fmt.Println(key)
+}
+// Output:
+// 005
+// 010
+```
+
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go
new file mode 100644
index 0000000..a636747
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go
@@ -0,0 +1,21 @@
+package iradix
+
+import "sort"
+
+type edges []edge
+
+func (e edges) Len() int {
+ return len(e)
+}
+
+func (e edges) Less(i, j int) bool {
+ return e[i].label < e[j].label
+}
+
+func (e edges) Swap(i, j int) {
+ e[i], e[j] = e[j], e[i]
+}
+
+func (e edges) Sort() {
+ sort.Sort(e)
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.mod b/vendor/github.com/hashicorp/go-immutable-radix/go.mod
new file mode 100644
index 0000000..27e7b7c
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/go.mod
@@ -0,0 +1,6 @@
+module github.com/hashicorp/go-immutable-radix
+
+require (
+ github.com/hashicorp/go-uuid v1.0.0
+ github.com/hashicorp/golang-lru v0.5.0
+)
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.sum b/vendor/github.com/hashicorp/go-immutable-radix/go.sum
new file mode 100644
index 0000000..7de5dfc
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/go.sum
@@ -0,0 +1,4 @@
+github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
new file mode 100644
index 0000000..e5e6e57
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
@@ -0,0 +1,662 @@
+package iradix
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+const (
+ // defaultModifiedCache is the default size of the modified node
+ // cache used per transaction. This is used to cache the updates
+ // to the nodes near the root, while the leaves do not need to be
+ // cached. This is important for very large transactions to prevent
+ // the modified cache from growing to be enormous. This is also used
+ // to set the max size of the mutation notify maps since those should
+ // also be bounded in a similar way.
+ defaultModifiedCache = 8192
+)
+
+// Tree implements an immutable radix tree. This can be treated as a
+// Dictionary abstract data type. The main advantage over a standard
+// hash map is prefix-based lookups and ordered iteration. The immutability
+// means that it is safe to concurrently read from a Tree without any
+// coordination.
+type Tree struct {
+ root *Node
+ size int
+}
+
+// New returns an empty Tree
+func New() *Tree {
+ t := &Tree{
+ root: &Node{
+ mutateCh: make(chan struct{}),
+ },
+ }
+ return t
+}
+
+// Len is used to return the number of elements in the tree
+func (t *Tree) Len() int {
+ return t.size
+}
+
+// Txn is a transaction on the tree. This transaction is applied
+// atomically and returns a new tree when committed. A transaction
+// is not thread safe, and should only be used by a single goroutine.
+type Txn struct {
+ // root is the modified root for the transaction.
+ root *Node
+
+ // snap is a snapshot of the root node for use if we have to run the
+ // slow notify algorithm.
+ snap *Node
+
+ // size tracks the size of the tree as it is modified during the
+ // transaction.
+ size int
+
+ // writable is a cache of writable nodes that have been created during
+ // the course of the transaction. This allows us to re-use the same
+ // nodes for further writes and avoid unnecessary copies of nodes that
+ // have never been exposed outside the transaction. This will only hold
+ // up to defaultModifiedCache number of entries.
+ writable *simplelru.LRU
+
+ // trackChannels is used to hold channels that need to be notified to
+ // signal mutation of the tree. This will only hold up to
+ // defaultModifiedCache number of entries, after which we will set the
+ // trackOverflow flag, which will cause us to use a more expensive
+ // algorithm to perform the notifications. Mutation tracking is only
+ // performed if trackMutate is true.
+ trackChannels map[chan struct{}]struct{}
+ trackOverflow bool
+ trackMutate bool
+}
+
+// Txn starts a new transaction that can be used to mutate the tree
+func (t *Tree) Txn() *Txn {
+ txn := &Txn{
+ root: t.root,
+ snap: t.root,
+ size: t.size,
+ }
+ return txn
+}
+
+// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
+// then notifications will be issued for affected internal nodes and leaves when
+// the transaction is committed.
+func (t *Txn) TrackMutate(track bool) {
+ t.trackMutate = track
+}
+
+// trackChannel safely attempts to track the given mutation channel, setting the
+// overflow flag if we can no longer track any more. This limits the amount of
+// state that will accumulate during a transaction and we have a slower algorithm
+// to switch to if we overflow.
+func (t *Txn) trackChannel(ch chan struct{}) {
+ // In overflow, make sure we don't store any more objects.
+ if t.trackOverflow {
+ return
+ }
+
+ // If this would overflow the state we reject it and set the flag (since
+ // we aren't tracking everything that's required any longer).
+ if len(t.trackChannels) >= defaultModifiedCache {
+ // Mark that we are in the overflow state
+ t.trackOverflow = true
+
+ // Clear the map so that the channels can be garbage collected. It is
+ // safe to do this since we have already overflowed and will be using
+ // the slow notify algorithm.
+ t.trackChannels = nil
+ return
+ }
+
+ // Create the map on the fly when we need it.
+ if t.trackChannels == nil {
+ t.trackChannels = make(map[chan struct{}]struct{})
+ }
+
+ // Otherwise we are good to track it.
+ t.trackChannels[ch] = struct{}{}
+}
+
+// writeNode returns a node to be modified, if the current node has already been
+// modified during the course of the transaction, it is used in-place. Set
+// forLeafUpdate to true if you are getting a write node to update the leaf,
+// which will set leaf mutation tracking appropriately as well.
+func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
+ // Ensure the writable set exists.
+ if t.writable == nil {
+ lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
+ if err != nil {
+ panic(err)
+ }
+ t.writable = lru
+ }
+
+ // If this node has already been modified, we can continue to use it
+ // during this transaction. We know that we don't need to track it for
+ // a node update since the node is writable, but if this is for a leaf
+ // update we track it, in case the initial write to this node didn't
+ // update the leaf.
+ if _, ok := t.writable.Get(n); ok {
+ if t.trackMutate && forLeafUpdate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+ return n
+ }
+
+ // Mark this node as being mutated.
+ if t.trackMutate {
+ t.trackChannel(n.mutateCh)
+ }
+
+ // Mark its leaf as being mutated, if appropriate.
+ if t.trackMutate && forLeafUpdate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+
+ // Copy the existing node. If you have set forLeafUpdate it will be
+ // safe to replace this leaf with another after you get your node for
+ // writing. You MUST replace it, because the channel associated with
+ // this leaf will be closed when this transaction is committed.
+ nc := &Node{
+ mutateCh: make(chan struct{}),
+ leaf: n.leaf,
+ }
+ if n.prefix != nil {
+ nc.prefix = make([]byte, len(n.prefix))
+ copy(nc.prefix, n.prefix)
+ }
+ if len(n.edges) != 0 {
+ nc.edges = make([]edge, len(n.edges))
+ copy(nc.edges, n.edges)
+ }
+
+ // Mark this node as writable.
+ t.writable.Add(nc, nil)
+ return nc
+}
+
+// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
+// Returns the size of the subtree visited
+func (t *Txn) trackChannelsAndCount(n *Node) int {
+ // Count only leaf nodes
+ leaves := 0
+ if n.leaf != nil {
+ leaves = 1
+ }
+ // Mark this node as being mutated.
+ if t.trackMutate {
+ t.trackChannel(n.mutateCh)
+ }
+
+ // Mark its leaf as being mutated, if appropriate.
+ if t.trackMutate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+
+ // Recurse on the children
+ for _, e := range n.edges {
+ leaves += t.trackChannelsAndCount(e.node)
+ }
+ return leaves
+}
+
+// mergeChild is called to collapse the given node with its child. This is only
+// called when the given node is not a leaf and has a single edge.
+func (t *Txn) mergeChild(n *Node) {
+ // Mark the child node as being mutated since we are about to abandon
+ // it. We don't need to mark the leaf since we are retaining it if it
+ // is there.
+ e := n.edges[0]
+ child := e.node
+ if t.trackMutate {
+ t.trackChannel(child.mutateCh)
+ }
+
+ // Merge the nodes.
+ n.prefix = concat(n.prefix, child.prefix)
+ n.leaf = child.leaf
+ if len(child.edges) != 0 {
+ n.edges = make([]edge, len(child.edges))
+ copy(n.edges, child.edges)
+ } else {
+ n.edges = nil
+ }
+}
+
+// insert does a recursive insertion
+func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
+ // Handle key exhaustion
+ if len(search) == 0 {
+ var oldVal interface{}
+ didUpdate := false
+ if n.isLeaf() {
+ oldVal = n.leaf.val
+ didUpdate = true
+ }
+
+ nc := t.writeNode(n, true)
+ nc.leaf = &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ }
+ return nc, oldVal, didUpdate
+ }
+
+ // Look for the edge
+ idx, child := n.getEdge(search[0])
+
+ // No edge, create one
+ if child == nil {
+ e := edge{
+ label: search[0],
+ node: &Node{
+ mutateCh: make(chan struct{}),
+ leaf: &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ },
+ prefix: search,
+ },
+ }
+ nc := t.writeNode(n, false)
+ nc.addEdge(e)
+ return nc, nil, false
+ }
+
+ // Determine longest prefix of the search key on match
+ commonPrefix := longestPrefix(search, child.prefix)
+ if commonPrefix == len(child.prefix) {
+ search = search[commonPrefix:]
+ newChild, oldVal, didUpdate := t.insert(child, k, search, v)
+ if newChild != nil {
+ nc := t.writeNode(n, false)
+ nc.edges[idx].node = newChild
+ return nc, oldVal, didUpdate
+ }
+ return nil, oldVal, didUpdate
+ }
+
+ // Split the node
+ nc := t.writeNode(n, false)
+ splitNode := &Node{
+ mutateCh: make(chan struct{}),
+ prefix: search[:commonPrefix],
+ }
+ nc.replaceEdge(edge{
+ label: search[0],
+ node: splitNode,
+ })
+
+ // Restore the existing child node
+ modChild := t.writeNode(child, false)
+ splitNode.addEdge(edge{
+ label: modChild.prefix[commonPrefix],
+ node: modChild,
+ })
+ modChild.prefix = modChild.prefix[commonPrefix:]
+
+ // Create a new leaf node
+ leaf := &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ }
+
+ // If the new key is a subset, add to to this node
+ search = search[commonPrefix:]
+ if len(search) == 0 {
+ splitNode.leaf = leaf
+ return nc, nil, false
+ }
+
+ // Create a new edge for the node
+ splitNode.addEdge(edge{
+ label: search[0],
+ node: &Node{
+ mutateCh: make(chan struct{}),
+ leaf: leaf,
+ prefix: search,
+ },
+ })
+ return nc, nil, false
+}
+
+// delete does a recursive deletion
+func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ if !n.isLeaf() {
+ return nil, nil
+ }
+ // Copy the pointer in case we are in a transaction that already
+ // modified this node since the node will be reused. Any changes
+ // made to the node will not affect returning the original leaf
+ // value.
+ oldLeaf := n.leaf
+
+ // Remove the leaf node
+ nc := t.writeNode(n, true)
+ nc.leaf = nil
+
+ // Check if this node should be merged
+ if n != t.root && len(nc.edges) == 1 {
+ t.mergeChild(nc)
+ }
+ return nc, oldLeaf
+ }
+
+ // Look for an edge
+ label := search[0]
+ idx, child := n.getEdge(label)
+ if child == nil || !bytes.HasPrefix(search, child.prefix) {
+ return nil, nil
+ }
+
+ // Consume the search prefix
+ search = search[len(child.prefix):]
+ newChild, leaf := t.delete(n, child, search)
+ if newChild == nil {
+ return nil, nil
+ }
+
+ // Copy this node. WATCH OUT - it's safe to pass "false" here because we
+ // will only ADD a leaf via nc.mergeChild() if there isn't one due to
+ // the !nc.isLeaf() check in the logic just below. This is pretty subtle,
+ // so be careful if you change any of the logic here.
+ nc := t.writeNode(n, false)
+
+ // Delete the edge if the node has no edges
+ if newChild.leaf == nil && len(newChild.edges) == 0 {
+ nc.delEdge(label)
+ if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
+ t.mergeChild(nc)
+ }
+ } else {
+ nc.edges[idx].node = newChild
+ }
+ return nc, leaf
+}
+
+// delete does a recursive deletion
+func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ nc := t.writeNode(n, true)
+ if n.isLeaf() {
+ nc.leaf = nil
+ }
+ nc.edges = nil
+ return nc, t.trackChannelsAndCount(n)
+ }
+
+ // Look for an edge
+ label := search[0]
+ idx, child := n.getEdge(label)
+ // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
+ // Need to do both so that we can delete prefixes that don't correspond to any node in the tree
+ if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
+ return nil, 0
+ }
+
+ // Consume the search prefix
+ if len(child.prefix) > len(search) {
+ search = []byte("")
+ } else {
+ search = search[len(child.prefix):]
+ }
+ newChild, numDeletions := t.deletePrefix(n, child, search)
+ if newChild == nil {
+ return nil, 0
+ }
+ // Copy this node. WATCH OUT - it's safe to pass "false" here because we
+ // will only ADD a leaf via nc.mergeChild() if there isn't one due to
+ // the !nc.isLeaf() check in the logic just below. This is pretty subtle,
+ // so be careful if you change any of the logic here.
+
+ nc := t.writeNode(n, false)
+
+ // Delete the edge if the node has no edges
+ if newChild.leaf == nil && len(newChild.edges) == 0 {
+ nc.delEdge(label)
+ if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
+ t.mergeChild(nc)
+ }
+ } else {
+ nc.edges[idx].node = newChild
+ }
+ return nc, numDeletions
+}
+
+// Insert is used to add or update a given key. The return provides
+// the previous value and a bool indicating if any was set.
+func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
+ newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
+ if newRoot != nil {
+ t.root = newRoot
+ }
+ if !didUpdate {
+ t.size++
+ }
+ return oldVal, didUpdate
+}
+
+// Delete is used to delete a given key. Returns the old value if any,
+// and a bool indicating if the key was set.
+func (t *Txn) Delete(k []byte) (interface{}, bool) {
+ newRoot, leaf := t.delete(nil, t.root, k)
+ if newRoot != nil {
+ t.root = newRoot
+ }
+ if leaf != nil {
+ t.size--
+ return leaf.val, true
+ }
+ return nil, false
+}
+
+// DeletePrefix is used to delete an entire subtree that matches the prefix
+// This will delete all nodes under that prefix
+func (t *Txn) DeletePrefix(prefix []byte) bool {
+ newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
+ if newRoot != nil {
+ t.root = newRoot
+ t.size = t.size - numDeletions
+ return true
+ }
+ return false
+
+}
+
+// Root returns the current root of the radix tree within this
+// transaction. The root is not safe across insert and delete operations,
+// but can be used to read the current state during a transaction.
+func (t *Txn) Root() *Node {
+ return t.root
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Txn) Get(k []byte) (interface{}, bool) {
+ return t.root.Get(k)
+}
+
+// GetWatch is used to lookup a specific key, returning
+// the watch channel, value and if it was found
+func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
+ return t.root.GetWatch(k)
+}
+
+// Commit is used to finalize the transaction and return a new tree. If mutation
+// tracking is turned on then notifications will also be issued.
+func (t *Txn) Commit() *Tree {
+ nt := t.CommitOnly()
+ if t.trackMutate {
+ t.Notify()
+ }
+ return nt
+}
+
+// CommitOnly is used to finalize the transaction and return a new tree, but
+// does not issue any notifications until Notify is called.
+func (t *Txn) CommitOnly() *Tree {
+ nt := &Tree{t.root, t.size}
+ t.writable = nil
+ return nt
+}
+
+// slowNotify does a complete comparison of the before and after trees in order
+// to trigger notifications. This doesn't require any additional state but it
+// is very expensive to compute.
+func (t *Txn) slowNotify() {
+ snapIter := t.snap.rawIterator()
+ rootIter := t.root.rawIterator()
+ for snapIter.Front() != nil || rootIter.Front() != nil {
+ // If we've exhausted the nodes in the old snapshot, we know
+ // there's nothing remaining to notify.
+ if snapIter.Front() == nil {
+ return
+ }
+ snapElem := snapIter.Front()
+
+ // If we've exhausted the nodes in the new root, we know we need
+ // to invalidate everything that remains in the old snapshot. We
+ // know from the loop condition there's something in the old
+ // snapshot.
+ if rootIter.Front() == nil {
+ close(snapElem.mutateCh)
+ if snapElem.isLeaf() {
+ close(snapElem.leaf.mutateCh)
+ }
+ snapIter.Next()
+ continue
+ }
+
+ // Do one string compare so we can check the various conditions
+ // below without repeating the compare.
+ cmp := strings.Compare(snapIter.Path(), rootIter.Path())
+
+ // If the snapshot is behind the root, then we must have deleted
+ // this node during the transaction.
+ if cmp < 0 {
+ close(snapElem.mutateCh)
+ if snapElem.isLeaf() {
+ close(snapElem.leaf.mutateCh)
+ }
+ snapIter.Next()
+ continue
+ }
+
+ // If the snapshot is ahead of the root, then we must have added
+ // this node during the transaction.
+ if cmp > 0 {
+ rootIter.Next()
+ continue
+ }
+
+ // If we have the same path, then we need to see if we mutated a
+ // node and possibly the leaf.
+ rootElem := rootIter.Front()
+ if snapElem != rootElem {
+ close(snapElem.mutateCh)
+ if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
+ close(snapElem.leaf.mutateCh)
+ }
+ }
+ snapIter.Next()
+ rootIter.Next()
+ }
+}
+
+// Notify is used along with TrackMutate to trigger notifications. This must
+// only be done once a transaction is committed via CommitOnly, and it is called
+// automatically by Commit.
+func (t *Txn) Notify() {
+ if !t.trackMutate {
+ return
+ }
+
+ // If we've overflowed the tracking state we can't use it in any way and
+ // need to do a full tree compare.
+ if t.trackOverflow {
+ t.slowNotify()
+ } else {
+ for ch := range t.trackChannels {
+ close(ch)
+ }
+ }
+
+ // Clean up the tracking state so that a re-notify is safe (will trigger
+ // the else clause above which will be a no-op).
+ t.trackChannels = nil
+ t.trackOverflow = false
+}
+
+// Insert is used to add or update a given key. The return provides
+// the new tree, previous value and a bool indicating if any was set.
+func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
+ txn := t.Txn()
+ old, ok := txn.Insert(k, v)
+ return txn.Commit(), old, ok
+}
+
+// Delete is used to delete a given key. Returns the new tree,
+// old value if any, and a bool indicating if the key was set.
+func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
+ txn := t.Txn()
+ old, ok := txn.Delete(k)
+ return txn.Commit(), old, ok
+}
+
+// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
+// and a bool indicating if the prefix matched any nodes
+func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
+ txn := t.Txn()
+ ok := txn.DeletePrefix(k)
+ return txn.Commit(), ok
+}
+
+// Root returns the root node of the tree which can be used for richer
+// query operations.
+func (t *Tree) Root() *Node {
+ return t.root
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Tree) Get(k []byte) (interface{}, bool) {
+ return t.root.Get(k)
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 []byte) int {
+ max := len(k1)
+ if l := len(k2); l < max {
+ max = l
+ }
+ var i int
+ for i = 0; i < max; i++ {
+ if k1[i] != k2[i] {
+ break
+ }
+ }
+ return i
+}
+
+// concat two byte slices, returning a third new copy
+func concat(a, b []byte) []byte {
+ c := make([]byte, len(a)+len(b))
+ copy(c, a)
+ copy(c[len(a):], b)
+ return c
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
new file mode 100644
index 0000000..1ecaf83
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
@@ -0,0 +1,188 @@
+package iradix
+
+import (
+ "bytes"
+)
+
+// Iterator is used to iterate over a set of nodes
+// in pre-order
+type Iterator struct {
+ node *Node
+ stack []edges
+}
+
+// SeekPrefixWatch is used to seek the iterator to a given prefix
+// and returns the watch channel of the finest granularity
+func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
+ // Wipe the stack
+ i.stack = nil
+ n := i.node
+ watch = n.mutateCh
+ search := prefix
+ for {
+ // Check for key exhaution
+ if len(search) == 0 {
+ i.node = n
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ i.node = nil
+ return
+ }
+
+ // Update to the finest granularity as the search makes progress
+ watch = n.mutateCh
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+
+ } else if bytes.HasPrefix(n.prefix, search) {
+ i.node = n
+ return
+ } else {
+ i.node = nil
+ return
+ }
+ }
+}
+
+// SeekPrefix is used to seek the iterator to a given prefix
+func (i *Iterator) SeekPrefix(prefix []byte) {
+ i.SeekPrefixWatch(prefix)
+}
+
+func (i *Iterator) recurseMin(n *Node) *Node {
+ // Traverse to the minimum child
+ if n.leaf != nil {
+ return n
+ }
+ if len(n.edges) > 0 {
+ // Add all the other edges to the stack (the min node will be added as
+ // we recurse)
+ i.stack = append(i.stack, n.edges[1:])
+ return i.recurseMin(n.edges[0].node)
+ }
+ // Shouldn't be possible
+ return nil
+}
+
+// SeekLowerBound is used to seek the iterator to the smallest key that is
+// greater or equal to the given key. There is no watch variant as it's hard to
+// predict based on the radix structure which node(s) changes might affect the
+// result.
+func (i *Iterator) SeekLowerBound(key []byte) {
+ // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
+ // go because we need only a subset of edges of many nodes in the path to the
+ // leaf with the lower bound.
+ i.stack = []edges{}
+ n := i.node
+ search := key
+
+ found := func(n *Node) {
+ i.node = n
+ i.stack = append(i.stack, edges{edge{node: n}})
+ }
+
+ for {
+ // Compare current prefix with the search key's same-length prefix.
+ var prefixCmp int
+ if len(n.prefix) < len(search) {
+ prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
+ } else {
+ prefixCmp = bytes.Compare(n.prefix, search)
+ }
+
+ if prefixCmp > 0 {
+ // Prefix is larger, that means the lower bound is greater than the search
+ // and from now on we need to follow the minimum path to the smallest
+ // leaf under this subtree.
+ n = i.recurseMin(n)
+ if n != nil {
+ found(n)
+ }
+ return
+ }
+
+ if prefixCmp < 0 {
+ // Prefix is smaller than search prefix, that means there is no lower
+ // bound
+ i.node = nil
+ return
+ }
+
+ // Prefix is equal, we are still heading for an exact match. If this is a
+ // leaf we're done.
+ if n.leaf != nil {
+ if bytes.Compare(n.leaf.key, key) < 0 {
+ i.node = nil
+ return
+ }
+ found(n)
+ return
+ }
+
+ // Consume the search prefix
+ if len(n.prefix) > len(search) {
+ search = []byte{}
+ } else {
+ search = search[len(n.prefix):]
+ }
+
+ // Otherwise, take the lower bound next edge.
+ idx, lbNode := n.getLowerBoundEdge(search[0])
+ if lbNode == nil {
+ i.node = nil
+ return
+ }
+
+ // Create stack edges for the all strictly higher edges in this node.
+ if idx+1 < len(n.edges) {
+ i.stack = append(i.stack, n.edges[idx+1:])
+ }
+
+ i.node = lbNode
+ // Recurse
+ n = lbNode
+ }
+}
+
+// Next returns the next node in order
+func (i *Iterator) Next() ([]byte, interface{}, bool) {
+ // Initialize our stack if needed
+ if i.stack == nil && i.node != nil {
+ i.stack = []edges{
+ edges{
+ edge{node: i.node},
+ },
+ }
+ }
+
+ for len(i.stack) > 0 {
+ // Inspect the last element of the stack
+ n := len(i.stack)
+ last := i.stack[n-1]
+ elem := last[0].node
+
+ // Update the stack
+ if len(last) > 1 {
+ i.stack[n-1] = last[1:]
+ } else {
+ i.stack = i.stack[:n-1]
+ }
+
+ // Push the edges onto the frontier
+ if len(elem.edges) > 0 {
+ i.stack = append(i.stack, elem.edges)
+ }
+
+ // Return the leaf values if any
+ if elem.leaf != nil {
+ return elem.leaf.key, elem.leaf.val, true
+ }
+ }
+ return nil, nil, false
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go
new file mode 100644
index 0000000..3ab904e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go
@@ -0,0 +1,304 @@
+package iradix
+
+import (
+ "bytes"
+ "sort"
+)
+
+// WalkFn is used when walking the tree. Takes a
+// key and value, returning if iteration should
+// be terminated.
+type WalkFn func(k []byte, v interface{}) bool
+
+// leafNode is used to represent a value
+type leafNode struct {
+ mutateCh chan struct{}
+ key []byte
+ val interface{}
+}
+
+// edge is used to represent an edge node
+type edge struct {
+ label byte
+ node *Node
+}
+
+// Node is an immutable node in the radix tree
+type Node struct {
+ // mutateCh is closed if this node is modified
+ mutateCh chan struct{}
+
+ // leaf is used to store possible leaf
+ leaf *leafNode
+
+ // prefix is the common prefix we ignore
+ prefix []byte
+
+ // Edges should be stored in-order for iteration.
+ // We avoid a fully materialized slice to save memory,
+ // since in most cases we expect to be sparse
+ edges edges
+}
+
+func (n *Node) isLeaf() bool {
+ return n.leaf != nil
+}
+
+func (n *Node) addEdge(e edge) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= e.label
+ })
+ n.edges = append(n.edges, e)
+ if idx != num {
+ copy(n.edges[idx+1:], n.edges[idx:num])
+ n.edges[idx] = e
+ }
+}
+
+func (n *Node) replaceEdge(e edge) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= e.label
+ })
+ if idx < num && n.edges[idx].label == e.label {
+ n.edges[idx].node = e.node
+ return
+ }
+ panic("replacing missing edge")
+}
+
+func (n *Node) getEdge(label byte) (int, *Node) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ if idx < num && n.edges[idx].label == label {
+ return idx, n.edges[idx].node
+ }
+ return -1, nil
+}
+
+func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ // we want lower bound behavior so return even if it's not an exact match
+ if idx < num {
+ return idx, n.edges[idx].node
+ }
+ return -1, nil
+}
+
+func (n *Node) delEdge(label byte) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ if idx < num && n.edges[idx].label == label {
+ copy(n.edges[idx:], n.edges[idx+1:])
+ n.edges[len(n.edges)-1] = edge{}
+ n.edges = n.edges[:len(n.edges)-1]
+ }
+}
+
+func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
+ search := k
+ watch := n.mutateCh
+ for {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ if n.isLeaf() {
+ return n.leaf.mutateCh, n.leaf.val, true
+ }
+ break
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Update to the finest granularity as the search makes progress
+ watch = n.mutateCh
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+ return watch, nil, false
+}
+
+func (n *Node) Get(k []byte) (interface{}, bool) {
+ _, val, ok := n.GetWatch(k)
+ return val, ok
+}
+
+// LongestPrefix is like Get, but instead of an
+// exact match, it will return the longest prefix match.
+func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
+ var last *leafNode
+ search := k
+ for {
+ // Look for a leaf node
+ if n.isLeaf() {
+ last = n.leaf
+ }
+
+ // Check for key exhaution
+ if len(search) == 0 {
+ break
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+ if last != nil {
+ return last.key, last.val, true
+ }
+ return nil, nil, false
+}
+
+// Minimum is used to return the minimum value in the tree
+func (n *Node) Minimum() ([]byte, interface{}, bool) {
+ for {
+ if n.isLeaf() {
+ return n.leaf.key, n.leaf.val, true
+ }
+ if len(n.edges) > 0 {
+ n = n.edges[0].node
+ } else {
+ break
+ }
+ }
+ return nil, nil, false
+}
+
+// Maximum is used to return the maximum value in the tree
+func (n *Node) Maximum() ([]byte, interface{}, bool) {
+ for {
+ if num := len(n.edges); num > 0 {
+ n = n.edges[num-1].node
+ continue
+ }
+ if n.isLeaf() {
+ return n.leaf.key, n.leaf.val, true
+ } else {
+ break
+ }
+ }
+ return nil, nil, false
+}
+
+// Iterator is used to return an iterator at
+// the given node to walk the tree
+func (n *Node) Iterator() *Iterator {
+ return &Iterator{node: n}
+}
+
+// rawIterator is used to return a raw iterator at the given node to walk the
+// tree.
+func (n *Node) rawIterator() *rawIterator {
+ iter := &rawIterator{node: n}
+ iter.Next()
+ return iter
+}
+
+// Walk is used to walk the tree
+func (n *Node) Walk(fn WalkFn) {
+ recursiveWalk(n, fn)
+}
+
+// WalkPrefix is used to walk the tree under a prefix
+func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
+ search := prefix
+ for {
+ // Check for key exhaution
+ if len(search) == 0 {
+ recursiveWalk(n, fn)
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+
+ } else if bytes.HasPrefix(n.prefix, search) {
+ // Child may be under our search prefix
+ recursiveWalk(n, fn)
+ return
+ } else {
+ break
+ }
+ }
+}
+
+// WalkPath is used to walk the tree, but only visiting nodes
+// from the root down to a given leaf. Where WalkPrefix walks
+// all the entries *under* the given prefix, this walks the
+// entries *above* the given prefix.
+func (n *Node) WalkPath(path []byte, fn WalkFn) {
+ search := path
+ for {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return
+ }
+
+ // Check for key exhaution
+ if len(search) == 0 {
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ return
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+}
+
+// recursiveWalk is used to do a pre-order walk of a node
+// recursively. Returns true if the walk should be aborted
+func recursiveWalk(n *Node, fn WalkFn) bool {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return true
+ }
+
+ // Recurse on the children
+ for _, e := range n.edges {
+ if recursiveWalk(e.node, fn) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
new file mode 100644
index 0000000..04814c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
@@ -0,0 +1,78 @@
+package iradix
+
+// rawIterator visits each of the nodes in the tree, even the ones that are not
+// leaves. It keeps track of the effective path (what a leaf at a given node
+// would be called), which is useful for comparing trees.
+type rawIterator struct {
+ // node is the starting node in the tree for the iterator.
+ node *Node
+
+ // stack keeps track of edges in the frontier.
+ stack []rawStackEntry
+
+ // pos is the current position of the iterator.
+ pos *Node
+
+ // path is the effective path of the current iterator position,
+ // regardless of whether the current node is a leaf.
+ path string
+}
+
+// rawStackEntry is used to keep track of the cumulative common path as well as
+// its associated edges in the frontier.
+type rawStackEntry struct {
+ path string
+ edges edges
+}
+
+// Front returns the current node that has been iterated to.
+func (i *rawIterator) Front() *Node {
+ return i.pos
+}
+
+// Path returns the effective path of the current node, even if it's not actually
+// a leaf.
+func (i *rawIterator) Path() string {
+ return i.path
+}
+
+// Next advances the iterator to the next node.
+func (i *rawIterator) Next() {
+ // Initialize our stack if needed.
+ if i.stack == nil && i.node != nil {
+ i.stack = []rawStackEntry{
+ rawStackEntry{
+ edges: edges{
+ edge{node: i.node},
+ },
+ },
+ }
+ }
+
+ for len(i.stack) > 0 {
+ // Inspect the last element of the stack.
+ n := len(i.stack)
+ last := i.stack[n-1]
+ elem := last.edges[0].node
+
+ // Update the stack.
+ if len(last.edges) > 1 {
+ i.stack[n-1].edges = last.edges[1:]
+ } else {
+ i.stack = i.stack[:n-1]
+ }
+
+ // Push the edges onto the frontier.
+ if len(elem.edges) > 0 {
+ path := last.path + string(elem.prefix)
+ i.stack = append(i.stack, rawStackEntry{path, elem.edges})
+ }
+
+ i.pos = elem
+ i.path = last.path + string(elem.prefix)
+ return
+ }
+
+ i.pos = nil
+ i.path = ""
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml
new file mode 100644
index 0000000..80e1de4
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml
@@ -0,0 +1,12 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.6
+
+branches:
+ only:
+ - master
+
+script: make test
diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile
new file mode 100644
index 0000000..c3989e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile
@@ -0,0 +1,8 @@
+TEST?=./...
+
+test:
+ go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4
+ go vet $(TEST)
+ go test $(TEST) -race
+
+.PHONY: test
diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md
new file mode 100644
index 0000000..6a128e1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/README.md
@@ -0,0 +1,44 @@
+# rootcerts
+
+Functions for loading root certificates for TLS connections.
+
+-----
+
+Go's standard library `crypto/tls` provides a common mechanism for configuring
+TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool
+of certificates for the client to use as a trust store when verifying server
+certificates.
+
+This library contains utility functions for loading certificates destined for
+that field, as well as one other important thing:
+
+When the `RootCAs` field is `nil`, the standard library attempts to load the
+host's root CA set. This behavior is OS-specific, and the Darwin
+implementation contains [a bug that prevents trusted certificates from the
+System and Login keychains from being loaded][1]. This library contains
+Darwin-specific behavior that works around that bug.
+
+[1]: https://github.com/golang/go/issues/14514
+
+## Example Usage
+
+Here's a snippet demonstrating how this library is meant to be used:
+
+```go
+func httpClient() (*http.Client, error)
+ tlsConfig := &tls.Config{}
+ err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
+ CAFile: os.Getenv("MYAPP_CAFILE"),
+ CAPath: os.Getenv("MYAPP_CAPATH"),
+ Certificate: os.Getenv("MYAPP_CERTIFICATE"),
+ })
+ if err != nil {
+ return nil, err
+ }
+ c := cleanhttp.DefaultClient()
+ t := cleanhttp.DefaultTransport()
+ t.TLSClientConfig = tlsConfig
+ c.Transport = t
+ return c, nil
+}
+```
diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go
new file mode 100644
index 0000000..b55cc62
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go
@@ -0,0 +1,9 @@
+// Package rootcerts contains functions to aid in loading CA certificates for
+// TLS connections.
+//
+// In addition, its default behavior on Darwin works around an open issue [1]
+// in Go's crypto/x509 that prevents certicates from being loaded from the
+// System or Login keychains.
+//
+// [1] https://github.com/golang/go/issues/14514
+package rootcerts
diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.mod b/vendor/github.com/hashicorp/go-rootcerts/go.mod
new file mode 100644
index 0000000..e2dd024
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/go.mod
@@ -0,0 +1,5 @@
+module github.com/hashicorp/go-rootcerts
+
+go 1.12
+
+require github.com/mitchellh/go-homedir v1.1.0
diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.sum b/vendor/github.com/hashicorp/go-rootcerts/go.sum
new file mode 100644
index 0000000..ae38d14
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/go.sum
@@ -0,0 +1,2 @@
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
new file mode 100644
index 0000000..69aabd6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
@@ -0,0 +1,123 @@
+package rootcerts
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+// Config determines where LoadCACerts will load certificates from. When CAFile,
+// CACertificate and CAPath are blank, this library's functions will either load
+// system roots explicitly and return them, or set the CertPool to nil to allow
+// Go's standard library to load system certs.
+type Config struct {
+ // CAFile is a path to a PEM-encoded certificate file or bundle. Takes
+ // precedence over CACertificate and CAPath.
+ CAFile string
+
+ // CACertificate is a PEM-encoded certificate or bundle. Takes precedence
+ // over CAPath.
+ CACertificate []byte
+
+ // CAPath is a path to a directory populated with PEM-encoded certificates.
+ CAPath string
+}
+
+// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the
+// Config specified.
+func ConfigureTLS(t *tls.Config, c *Config) error {
+ if t == nil {
+ return nil
+ }
+ pool, err := LoadCACerts(c)
+ if err != nil {
+ return err
+ }
+ t.RootCAs = pool
+ return nil
+}
+
+// LoadCACerts loads a CertPool based on the Config specified.
+func LoadCACerts(c *Config) (*x509.CertPool, error) {
+ if c == nil {
+ c = &Config{}
+ }
+ if c.CAFile != "" {
+ return LoadCAFile(c.CAFile)
+ }
+ if len(c.CACertificate) != 0 {
+ return AppendCertificate(c.CACertificate)
+ }
+ if c.CAPath != "" {
+ return LoadCAPath(c.CAPath)
+ }
+
+ return LoadSystemCAs()
+}
+
+// LoadCAFile loads a single PEM-encoded file from the path specified.
+func LoadCAFile(caFile string) (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+
+ pem, err := ioutil.ReadFile(caFile)
+ if err != nil {
+ return nil, fmt.Errorf("Error loading CA File: %s", err)
+ }
+
+ ok := pool.AppendCertsFromPEM(pem)
+ if !ok {
+ return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile)
+ }
+
+ return pool, nil
+}
+
+// AppendCertificate appends an in-memory PEM-encoded certificate or bundle and returns a pool.
+func AppendCertificate(ca []byte) (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+
+ ok := pool.AppendCertsFromPEM(ca)
+ if !ok {
+ return nil, errors.New("Error appending CA: Couldn't parse PEM")
+ }
+
+ return pool, nil
+}
+
+// LoadCAPath walks the provided path and loads all certificates encounted into
+// a pool.
+func LoadCAPath(caPath string) (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+ walkFn := func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if info.IsDir() {
+ return nil
+ }
+
+ pem, err := ioutil.ReadFile(path)
+ if err != nil {
+ return fmt.Errorf("Error loading file from CAPath: %s", err)
+ }
+
+ ok := pool.AppendCertsFromPEM(pem)
+ if !ok {
+ return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path)
+ }
+
+ return nil
+ }
+
+ err := filepath.Walk(caPath, walkFn)
+ if err != nil {
+ return nil, err
+ }
+
+ return pool, nil
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
new file mode 100644
index 0000000..66b1472
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
@@ -0,0 +1,12 @@
+// +build !darwin
+
+package rootcerts
+
+import "crypto/x509"
+
+// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that
+// default behavior of standard TLS config libraries is triggered, which is to
+// load system certs.
+func LoadSystemCAs() (*x509.CertPool, error) {
+ return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
new file mode 100644
index 0000000..a9a0406
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
@@ -0,0 +1,48 @@
+package rootcerts
+
+import (
+ "crypto/x509"
+ "os/exec"
+ "path"
+
+ "github.com/mitchellh/go-homedir"
+)
+
+// LoadSystemCAs has special behavior on Darwin systems to work around
+func LoadSystemCAs() (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+
+ for _, keychain := range certKeychains() {
+ err := addCertsFromKeychain(pool, keychain)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return pool, nil
+}
+
+func addCertsFromKeychain(pool *x509.CertPool, keychain string) error {
+ cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain)
+ data, err := cmd.Output()
+ if err != nil {
+ return err
+ }
+
+ pool.AppendCertsFromPEM(data)
+
+ return nil
+}
+
+func certKeychains() []string {
+ keychains := []string{
+ "/System/Library/Keychains/SystemRootCertificates.keychain",
+ "/Library/Keychains/System.keychain",
+ }
+ home, err := homedir.Dir()
+ if err == nil {
+ loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain")
+ keychains = append(keychains, loginKeychain)
+ }
+ return keychains
+}
diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml
new file mode 100644
index 0000000..7698490
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/.travis.yml
@@ -0,0 +1,12 @@
+language: go
+
+sudo: false
+
+go:
+ - 1.4
+ - 1.5
+ - 1.6
+ - tip
+
+script:
+ - go test -bench . -benchmem -v ./...
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
new file mode 100644
index 0000000..fbde8b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/README.md
@@ -0,0 +1,8 @@
+# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid)
+
+Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod
new file mode 100644
index 0000000..dd57f9d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/go.mod
@@ -0,0 +1 @@
+module github.com/hashicorp/go-uuid
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
new file mode 100644
index 0000000..0c10c4e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -0,0 +1,83 @@
+package uuid
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "io"
+)
+
+// GenerateRandomBytes is used to generate random bytes of given size.
+func GenerateRandomBytes(size int) ([]byte, error) {
+ return GenerateRandomBytesWithReader(size, rand.Reader)
+}
+
+// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader.
+func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) {
+ if reader == nil {
+ return nil, fmt.Errorf("provided reader is nil")
+ }
+ buf := make([]byte, size)
+ if _, err := io.ReadFull(reader, buf); err != nil {
+ return nil, fmt.Errorf("failed to read random bytes: %v", err)
+ }
+ return buf, nil
+}
+
+
+const uuidLen = 16
+
+// GenerateUUID is used to generate a random UUID
+func GenerateUUID() (string, error) {
+ return GenerateUUIDWithReader(rand.Reader)
+}
+
+// GenerateUUIDWithReader is used to generate a random UUID with a given Reader
+func GenerateUUIDWithReader(reader io.Reader) (string, error) {
+ if reader == nil {
+ return "", fmt.Errorf("provided reader is nil")
+ }
+ buf, err := GenerateRandomBytesWithReader(uuidLen, reader)
+ if err != nil {
+ return "", err
+ }
+ return FormatUUID(buf)
+}
+
+func FormatUUID(buf []byte) (string, error) {
+ if buflen := len(buf); buflen != uuidLen {
+ return "", fmt.Errorf("wrong length byte slice (%d)", buflen)
+ }
+
+ return fmt.Sprintf("%x-%x-%x-%x-%x",
+ buf[0:4],
+ buf[4:6],
+ buf[6:8],
+ buf[8:10],
+ buf[10:16]), nil
+}
+
+func ParseUUID(uuid string) ([]byte, error) {
+ if len(uuid) != 2 * uuidLen + 4 {
+ return nil, fmt.Errorf("uuid string is wrong length")
+ }
+
+ if uuid[8] != '-' ||
+ uuid[13] != '-' ||
+ uuid[18] != '-' ||
+ uuid[23] != '-' {
+ return nil, fmt.Errorf("uuid is improperly formatted")
+ }
+
+ hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
+
+ ret, err := hex.DecodeString(hexStr)
+ if err != nil {
+ return nil, err
+ }
+ if len(ret) != uuidLen {
+ return nil, fmt.Errorf("decoded hex is the wrong length")
+ }
+
+ return ret, nil
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 0000000..be2cc4d
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
new file mode 100644
index 0000000..a86c853
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -0,0 +1,177 @@
+package simplelru
+
+import (
+ "container/list"
+ "errors"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback func(key interface{}, value interface{})
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU struct {
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ onEvict EvictCallback
+}
+
+// entry is used to hold a value in the evictList
+type entry struct {
+ key interface{}
+ value interface{}
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
+ if size <= 0 {
+ return nil, errors.New("Must provide a positive size")
+ }
+ c := &LRU{
+ size: size,
+ evictList: list.New(),
+ items: make(map[interface{}]*list.Element),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache.
+func (c *LRU) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value.(*entry).value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *LRU) Add(key, value interface{}) (evicted bool) {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value.(*entry).value = value
+ return false
+ }
+
+ // Add new item
+ ent := &entry{key, value}
+ entry := c.evictList.PushFront(ent)
+ c.items[key] = entry
+
+ evict := c.evictList.Len() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ if ent.Value.(*entry) == nil {
+ return nil, false
+ }
+ return ent.Value.(*entry).value, true
+ }
+ return
+}
+
+// Contains checks if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU) Contains(key interface{}) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
+ var ent *list.Element
+ if ent, ok = c.items[key]; ok {
+ return ent.Value.(*entry).value, true
+ }
+ return nil, ok
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU) Remove(key interface{}) (present bool) {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU) Keys() []interface{} {
+ keys := make([]interface{}, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
+ keys[i] = ent.Value.(*entry).key
+ i++
+ }
+ return keys
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU) Len() int {
+ return c.evictList.Len()
+}
+
+// Resize changes the cache size.
+func (c *LRU) Resize(size int) (evicted int) {
+ diff := c.Len() - size
+ if diff < 0 {
+ diff = 0
+ }
+ for i := 0; i < diff; i++ {
+ c.removeOldest()
+ }
+ c.size = size
+ return diff
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU) removeOldest() {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU) removeElement(e *list.Element) {
+ c.evictList.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.items, kv.key)
+ if c.onEvict != nil {
+ c.onEvict(kv.key, kv.value)
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
new file mode 100644
index 0000000..92d7093
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
@@ -0,0 +1,39 @@
+package simplelru
+
+// LRUCache is the interface for simple LRU cache.
+type LRUCache interface {
+ // Adds a value to the cache, returns true if an eviction occurred and
+ // updates the "recently used"-ness of the key.
+ Add(key, value interface{}) bool
+
+ // Returns key's value from the cache and
+ // updates the "recently used"-ness of the key. #value, isFound
+ Get(key interface{}) (value interface{}, ok bool)
+
+ // Checks if a key exists in cache without updating the recent-ness.
+ Contains(key interface{}) (ok bool)
+
+ // Returns key's value without updating the "recently used"-ness of the key.
+ Peek(key interface{}) (value interface{}, ok bool)
+
+ // Removes a key from the cache.
+ Remove(key interface{}) bool
+
+ // Removes the oldest entry from cache.
+ RemoveOldest() (interface{}, interface{}, bool)
+
+ // Returns the oldest entry from the cache. #key, value, isFound
+ GetOldest() (interface{}, interface{}, bool)
+
+ // Returns a slice of the keys in the cache, from oldest to newest.
+ Keys() []interface{}
+
+ // Returns the number of items in the cache.
+ Len() int
+
+ // Clears all cache entries.
+ Purge()
+
+ // Resizes cache, returning number evicted
+ Resize(int) int
+}
diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go
new file mode 100644
index 0000000..3582ee4
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/coordinate/client.go
@@ -0,0 +1,243 @@
+package coordinate
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/armon/go-metrics"
+)
+
+// Client manages the estimated network coordinate for a given node, and adjusts
+// it as the node observes round trip times and estimated coordinates from other
+// nodes. The core algorithm is based on Vivaldi, see the documentation for Config
+// for more details.
+type Client struct {
+ // coord is the current estimate of the client's network coordinate.
+ coord *Coordinate
+
+ // origin is a coordinate sitting at the origin.
+ origin *Coordinate
+
+ // config contains the tuning parameters that govern the performance of
+ // the algorithm.
+ config *Config
+
+ // adjustmentIndex is the current index into the adjustmentSamples slice.
+ adjustmentIndex uint
+
+ // adjustment is used to store samples for the adjustment calculation.
+ adjustmentSamples []float64
+
+ // latencyFilterSamples is used to store the last several RTT samples,
+ // keyed by node name. We will use the config's LatencyFilterSamples
+ // value to determine how many samples we keep, per node.
+ latencyFilterSamples map[string][]float64
+
+ // stats is used to record events that occur when updating coordinates.
+ stats ClientStats
+
+ // mutex enables safe concurrent access to the client.
+ mutex sync.RWMutex
+}
+
+// ClientStats is used to record events that occur when updating coordinates.
+type ClientStats struct {
+ // Resets is incremented any time we reset our local coordinate because
+ // our calculations have resulted in an invalid state.
+ Resets int
+}
+
+// NewClient creates a new Client and verifies the configuration is valid.
+func NewClient(config *Config) (*Client, error) {
+ if !(config.Dimensionality > 0) {
+ return nil, fmt.Errorf("dimensionality must be >0")
+ }
+
+ return &Client{
+ coord: NewCoordinate(config),
+ origin: NewCoordinate(config),
+ config: config,
+ adjustmentIndex: 0,
+ adjustmentSamples: make([]float64, config.AdjustmentWindowSize),
+ latencyFilterSamples: make(map[string][]float64),
+ }, nil
+}
+
+// GetCoordinate returns a copy of the coordinate for this client.
+func (c *Client) GetCoordinate() *Coordinate {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ return c.coord.Clone()
+}
+
+// SetCoordinate forces the client's coordinate to a known state.
+func (c *Client) SetCoordinate(coord *Coordinate) error {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ if err := c.checkCoordinate(coord); err != nil {
+ return err
+ }
+
+ c.coord = coord.Clone()
+ return nil
+}
+
+// ForgetNode removes any client state for the given node.
+func (c *Client) ForgetNode(node string) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ delete(c.latencyFilterSamples, node)
+}
+
+// Stats returns a copy of stats for the client.
+func (c *Client) Stats() ClientStats {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ return c.stats
+}
+
+// checkCoordinate returns an error if the coordinate isn't compatible with
+// this client, or if the coordinate itself isn't valid. This assumes the mutex
+// has been locked already.
+func (c *Client) checkCoordinate(coord *Coordinate) error {
+ if !c.coord.IsCompatibleWith(coord) {
+ return fmt.Errorf("dimensions aren't compatible")
+ }
+
+ if !coord.IsValid() {
+ return fmt.Errorf("coordinate is invalid")
+ }
+
+ return nil
+}
+
+// latencyFilter applies a simple moving median filter with a new sample for
+// a node. This assumes that the mutex has been locked already.
+func (c *Client) latencyFilter(node string, rttSeconds float64) float64 {
+ samples, ok := c.latencyFilterSamples[node]
+ if !ok {
+ samples = make([]float64, 0, c.config.LatencyFilterSize)
+ }
+
+ // Add the new sample and trim the list, if needed.
+ samples = append(samples, rttSeconds)
+ if len(samples) > int(c.config.LatencyFilterSize) {
+ samples = samples[1:]
+ }
+ c.latencyFilterSamples[node] = samples
+
+ // Sort a copy of the samples and return the median.
+ sorted := make([]float64, len(samples))
+ copy(sorted, samples)
+ sort.Float64s(sorted)
+ return sorted[len(sorted)/2]
+}
+
+// updateVivialdi updates the Vivaldi portion of the client's coordinate. This
+// assumes that the mutex has been locked already.
+func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) {
+ const zeroThreshold = 1.0e-6
+
+ dist := c.coord.DistanceTo(other).Seconds()
+ if rttSeconds < zeroThreshold {
+ rttSeconds = zeroThreshold
+ }
+ wrongness := math.Abs(dist-rttSeconds) / rttSeconds
+
+ totalError := c.coord.Error + other.Error
+ if totalError < zeroThreshold {
+ totalError = zeroThreshold
+ }
+ weight := c.coord.Error / totalError
+
+ c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight)
+ if c.coord.Error > c.config.VivaldiErrorMax {
+ c.coord.Error = c.config.VivaldiErrorMax
+ }
+
+ delta := c.config.VivaldiCC * weight
+ force := delta * (rttSeconds - dist)
+ c.coord = c.coord.ApplyForce(c.config, force, other)
+}
+
+// updateAdjustment updates the adjustment portion of the client's coordinate, if
+// the feature is enabled. This assumes that the mutex has been locked already.
+func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) {
+ if c.config.AdjustmentWindowSize == 0 {
+ return
+ }
+
+ // Note that the existing adjustment factors don't figure in to this
+ // calculation so we use the raw distance here.
+ dist := c.coord.rawDistanceTo(other)
+ c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist
+ c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize
+
+ sum := 0.0
+ for _, sample := range c.adjustmentSamples {
+ sum += sample
+ }
+ c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize))
+}
+
+// updateGravity applies a small amount of gravity to pull coordinates towards
+// the center of the coordinate system to combat drift. This assumes that the
+// mutex is locked already.
+func (c *Client) updateGravity() {
+ dist := c.origin.DistanceTo(c.coord).Seconds()
+ force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0)
+ c.coord = c.coord.ApplyForce(c.config, force, c.origin)
+}
+
+// Update takes other, a coordinate for another node, and rtt, a round trip
+// time observation for a ping to that node, and updates the estimated position of
+// the client's coordinate. Returns the updated coordinate.
+func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) {
+ c.mutex.Lock()
+ defer c.mutex.Unlock()
+
+ if err := c.checkCoordinate(other); err != nil {
+ return nil, err
+ }
+
+ // The code down below can handle zero RTTs, which we have seen in
+ // https://github.com/hashicorp/consul/issues/3789, presumably in
+ // environments with coarse-grained monotonic clocks (we are still
+ // trying to pin this down). In any event, this is ok from a code PoV
+ // so we don't need to alert operators with spammy messages. We did
+ // add a counter so this is still observable, though.
+ const maxRTT = 10 * time.Second
+ if rtt < 0 || rtt > maxRTT {
+ return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT)
+ }
+ if rtt == 0 {
+ metrics.IncrCounter([]string{"serf", "coordinate", "zero-rtt"}, 1)
+ }
+
+ rttSeconds := c.latencyFilter(node, rtt.Seconds())
+ c.updateVivaldi(other, rttSeconds)
+ c.updateAdjustment(other, rttSeconds)
+ c.updateGravity()
+ if !c.coord.IsValid() {
+ c.stats.Resets++
+ c.coord = NewCoordinate(c.config)
+ }
+
+ return c.coord.Clone(), nil
+}
+
+// DistanceTo returns the estimated RTT from the client's coordinate to other, the
+// coordinate for another node.
+func (c *Client) DistanceTo(other *Coordinate) time.Duration {
+ c.mutex.RLock()
+ defer c.mutex.RUnlock()
+
+ return c.coord.DistanceTo(other)
+}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go
new file mode 100644
index 0000000..b85a8ab
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/coordinate/config.go
@@ -0,0 +1,70 @@
+package coordinate
+
+// Config is used to set the parameters of the Vivaldi-based coordinate mapping
+// algorithm.
+//
+// The following references are called out at various points in the documentation
+// here:
+//
+// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system."
+// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004.
+// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates
+// in the Wild." NSDI. Vol. 7. 2007.
+// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for
+// host-based network coordinate systems." Networking, IEEE/ACM Transactions
+// on 18.1 (2010): 27-40.
+type Config struct {
+ // The dimensionality of the coordinate system. As discussed in [2], more
+ // dimensions improves the accuracy of the estimates up to a point. Per [2]
+ // we chose 8 dimensions plus a non-Euclidean height.
+ Dimensionality uint
+
+ // VivaldiErrorMax is the default error value when a node hasn't yet made
+ // any observations. It also serves as an upper limit on the error value in
+ // case observations cause the error value to increase without bound.
+ VivaldiErrorMax float64
+
+ // VivaldiCE is a tuning factor that controls the maximum impact an
+ // observation can have on a node's confidence. See [1] for more details.
+ VivaldiCE float64
+
+ // VivaldiCC is a tuning factor that controls the maximum impact an
+ // observation can have on a node's coordinate. See [1] for more details.
+ VivaldiCC float64
+
+ // AdjustmentWindowSize is a tuning factor that determines how many samples
+ // we retain to calculate the adjustment factor as discussed in [3]. Setting
+ // this to zero disables this feature.
+ AdjustmentWindowSize uint
+
+ // HeightMin is the minimum value of the height parameter. Since this
+ // always must be positive, it will introduce a small amount error, so
+ // the chosen value should be relatively small compared to "normal"
+ // coordinates.
+ HeightMin float64
+
+ // LatencyFilterSamples is the maximum number of samples that are retained
+ // per node, in order to compute a median. The intent is to ride out blips
+ // but still keep the delay low, since our time to probe any given node is
+ // pretty infrequent. See [2] for more details.
+ LatencyFilterSize uint
+
+ // GravityRho is a tuning factor that sets how much gravity has an effect
+ // to try to re-center coordinates. See [2] for more details.
+ GravityRho float64
+}
+
+// DefaultConfig returns a Config that has some default values suitable for
+// basic testing of the algorithm, but not tuned to any particular type of cluster.
+func DefaultConfig() *Config {
+ return &Config{
+ Dimensionality: 8,
+ VivaldiErrorMax: 1.5,
+ VivaldiCE: 0.25,
+ VivaldiCC: 0.25,
+ AdjustmentWindowSize: 20,
+ HeightMin: 10.0e-6,
+ LatencyFilterSize: 3,
+ GravityRho: 150.0,
+ }
+}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go
new file mode 100644
index 0000000..fbe792c
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go
@@ -0,0 +1,203 @@
+package coordinate
+
+import (
+ "math"
+ "math/rand"
+ "time"
+)
+
+// Coordinate is a specialized structure for holding network coordinates for the
+// Vivaldi-based coordinate mapping algorithm. All of the fields should be public
+// to enable this to be serialized. All values in here are in units of seconds.
+type Coordinate struct {
+ // Vec is the Euclidean portion of the coordinate. This is used along
+ // with the other fields to provide an overall distance estimate. The
+ // units here are seconds.
+ Vec []float64
+
+ // Err reflects the confidence in the given coordinate and is updated
+ // dynamically by the Vivaldi Client. This is dimensionless.
+ Error float64
+
+ // Adjustment is a distance offset computed based on a calculation over
+ // observations from all other nodes over a fixed window and is updated
+ // dynamically by the Vivaldi Client. The units here are seconds.
+ Adjustment float64
+
+ // Height is a distance offset that accounts for non-Euclidean effects
+ // which model the access links from nodes to the core Internet. The access
+ // links are usually set by bandwidth and congestion, and the core links
+ // usually follow distance based on geography.
+ Height float64
+}
+
+const (
+ // secondsToNanoseconds is used to convert float seconds to nanoseconds.
+ secondsToNanoseconds = 1.0e9
+
+ // zeroThreshold is used to decide if two coordinates are on top of each
+ // other.
+ zeroThreshold = 1.0e-6
+)
+
+// ErrDimensionalityConflict will be panic-d if you try to perform operations
+// with incompatible dimensions.
+type DimensionalityConflictError struct{}
+
+// Adds the error interface.
+func (e DimensionalityConflictError) Error() string {
+ return "coordinate dimensionality does not match"
+}
+
+// NewCoordinate creates a new coordinate at the origin, using the given config
+// to supply key initial values.
+func NewCoordinate(config *Config) *Coordinate {
+ return &Coordinate{
+ Vec: make([]float64, config.Dimensionality),
+ Error: config.VivaldiErrorMax,
+ Adjustment: 0.0,
+ Height: config.HeightMin,
+ }
+}
+
+// Clone creates an independent copy of this coordinate.
+func (c *Coordinate) Clone() *Coordinate {
+ vec := make([]float64, len(c.Vec))
+ copy(vec, c.Vec)
+ return &Coordinate{
+ Vec: vec,
+ Error: c.Error,
+ Adjustment: c.Adjustment,
+ Height: c.Height,
+ }
+}
+
+// componentIsValid returns false if a floating point value is a NaN or an
+// infinity.
+func componentIsValid(f float64) bool {
+ return !math.IsInf(f, 0) && !math.IsNaN(f)
+}
+
+// IsValid returns false if any component of a coordinate isn't valid, per the
+// componentIsValid() helper above.
+func (c *Coordinate) IsValid() bool {
+ for i := range c.Vec {
+ if !componentIsValid(c.Vec[i]) {
+ return false
+ }
+ }
+
+ return componentIsValid(c.Error) &&
+ componentIsValid(c.Adjustment) &&
+ componentIsValid(c.Height)
+}
+
+// IsCompatibleWith checks to see if the two coordinates are compatible
+// dimensionally. If this returns true then you are guaranteed to not get
+// any runtime errors operating on them.
+func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool {
+ return len(c.Vec) == len(other.Vec)
+}
+
+// ApplyForce returns the result of applying the force from the direction of the
+// other coordinate.
+func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate {
+ if !c.IsCompatibleWith(other) {
+ panic(DimensionalityConflictError{})
+ }
+
+ ret := c.Clone()
+ unit, mag := unitVectorAt(c.Vec, other.Vec)
+ ret.Vec = add(ret.Vec, mul(unit, force))
+ if mag > zeroThreshold {
+ ret.Height = (ret.Height+other.Height)*force/mag + ret.Height
+ ret.Height = math.Max(ret.Height, config.HeightMin)
+ }
+ return ret
+}
+
+// DistanceTo returns the distance between this coordinate and the other
+// coordinate, including adjustments.
+func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration {
+ if !c.IsCompatibleWith(other) {
+ panic(DimensionalityConflictError{})
+ }
+
+ dist := c.rawDistanceTo(other)
+ adjustedDist := dist + c.Adjustment + other.Adjustment
+ if adjustedDist > 0.0 {
+ dist = adjustedDist
+ }
+ return time.Duration(dist * secondsToNanoseconds)
+}
+
+// rawDistanceTo returns the Vivaldi distance between this coordinate and the
+// other coordinate in seconds, not including adjustments. This assumes the
+// dimensions have already been checked to be compatible.
+func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 {
+ return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height
+}
+
+// add returns the sum of vec1 and vec2. This assumes the dimensions have
+// already been checked to be compatible.
+func add(vec1 []float64, vec2 []float64) []float64 {
+ ret := make([]float64, len(vec1))
+ for i := range ret {
+ ret[i] = vec1[i] + vec2[i]
+ }
+ return ret
+}
+
+// diff returns the difference between the vec1 and vec2. This assumes the
+// dimensions have already been checked to be compatible.
+func diff(vec1 []float64, vec2 []float64) []float64 {
+ ret := make([]float64, len(vec1))
+ for i := range ret {
+ ret[i] = vec1[i] - vec2[i]
+ }
+ return ret
+}
+
+// mul returns vec multiplied by a scalar factor.
+func mul(vec []float64, factor float64) []float64 {
+ ret := make([]float64, len(vec))
+ for i := range vec {
+ ret[i] = vec[i] * factor
+ }
+ return ret
+}
+
+// magnitude computes the magnitude of the vec.
+func magnitude(vec []float64) float64 {
+ sum := 0.0
+ for i := range vec {
+ sum += vec[i] * vec[i]
+ }
+ return math.Sqrt(sum)
+}
+
+// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two
+// positions are the same then a random unit vector is returned. We also return
+// the distance between the points for use in the later height calculation.
+func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) {
+ ret := diff(vec1, vec2)
+
+ // If the coordinates aren't on top of each other we can normalize.
+ if mag := magnitude(ret); mag > zeroThreshold {
+ return mul(ret, 1.0/mag), mag
+ }
+
+ // Otherwise, just return a random unit vector.
+ for i := range ret {
+ ret[i] = rand.Float64() - 0.5
+ }
+ if mag := magnitude(ret); mag > zeroThreshold {
+ return mul(ret, 1.0/mag), 0.0
+ }
+
+ // And finally just give up and make a unit vector along the first
+ // dimension. This should be exceedingly rare.
+ ret = make([]float64, len(ret))
+ ret[0] = 1.0
+ return ret, 0.0
+}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go
new file mode 100644
index 0000000..6fb033c
--- /dev/null
+++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go
@@ -0,0 +1,187 @@
+package coordinate
+
+import (
+ "fmt"
+ "math"
+ "math/rand"
+ "time"
+)
+
+// GenerateClients returns a slice with nodes number of clients, all with the
+// given config.
+func GenerateClients(nodes int, config *Config) ([]*Client, error) {
+ clients := make([]*Client, nodes)
+ for i, _ := range clients {
+ client, err := NewClient(config)
+ if err != nil {
+ return nil, err
+ }
+
+ clients[i] = client
+ }
+ return clients, nil
+}
+
+// GenerateLine returns a truth matrix as if all the nodes are in a straight linke
+// with the given spacing between them.
+func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration {
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ rtt := time.Duration(j-i) * spacing
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional
+// grid with the given spacing between them.
+func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration {
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ n := int(math.Sqrt(float64(nodes)))
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ x1, y1 := float64(i%n), float64(i/n)
+ x2, y2 := float64(j%n), float64(j/n)
+ dx, dy := x2-x1, y2-y1
+ dist := math.Sqrt(dx*dx + dy*dy)
+ rtt := time.Duration(dist * float64(spacing))
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// GenerateSplit returns a truth matrix as if half the nodes are close together in
+// one location and half the nodes are close together in another. The lan factor
+// is used to separate the nodes locally and the wan factor represents the split
+// between the two sides.
+func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration {
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ split := nodes / 2
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ rtt := lan
+ if (i <= split && j > split) || (i > split && j <= split) {
+ rtt += wan
+ }
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed
+// around a circle with the given radius. The first node is at the "center" of the
+// circle because it's equidistant from all the other nodes, but we place it at
+// double the radius, so it should show up above all the other nodes in height.
+func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration {
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ var rtt time.Duration
+ if i == 0 {
+ rtt = 2 * radius
+ } else {
+ t1 := 2.0 * math.Pi * float64(i) / float64(nodes)
+ x1, y1 := math.Cos(t1), math.Sin(t1)
+ t2 := 2.0 * math.Pi * float64(j) / float64(nodes)
+ x2, y2 := math.Cos(t2), math.Sin(t2)
+ dx, dy := x2-x1, y2-y1
+ dist := math.Sqrt(dx*dx + dy*dy)
+ rtt = time.Duration(dist * float64(radius))
+ }
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// GenerateRandom returns a truth matrix for a set of nodes with normally
+// distributed delays, with the given mean and deviation. The RNG is re-seeded
+// so you always get the same matrix for a given size.
+func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration {
+ rand.Seed(1)
+
+ truth := make([][]time.Duration, nodes)
+ for i := range truth {
+ truth[i] = make([]time.Duration, nodes)
+ }
+
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds()
+ rtt := time.Duration(rttSeconds * secondsToNanoseconds)
+ truth[i][j], truth[j][i] = rtt, rtt
+ }
+ }
+ return truth
+}
+
+// Simulate runs the given number of cycles using the given list of clients and
+// truth matrix. On each cycle, each client will pick a random node and observe
+// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for
+// each simulation run to get deterministic results (for this algorithm and the
+// underlying algorithm which will use random numbers for position vectors when
+// starting out with everything at the origin).
+func Simulate(clients []*Client, truth [][]time.Duration, cycles int) {
+ rand.Seed(1)
+
+ nodes := len(clients)
+ for cycle := 0; cycle < cycles; cycle++ {
+ for i, _ := range clients {
+ if j := rand.Intn(nodes); j != i {
+ c := clients[j].GetCoordinate()
+ rtt := truth[i][j]
+ node := fmt.Sprintf("node_%d", j)
+ clients[i].Update(node, c, rtt)
+ }
+ }
+ }
+}
+
+// Stats is returned from the Evaluate function with a summary of the algorithm
+// performance.
+type Stats struct {
+ ErrorMax float64
+ ErrorAvg float64
+}
+
+// Evaluate uses the coordinates of the given clients to calculate estimated
+// distances and compares them with the given truth matrix, returning summary
+// stats.
+func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) {
+ nodes := len(clients)
+ count := 0
+ for i := 0; i < nodes; i++ {
+ for j := i + 1; j < nodes; j++ {
+ est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds()
+ actual := truth[i][j].Seconds()
+ error := math.Abs(est-actual) / actual
+ stats.ErrorMax = math.Max(stats.ErrorMax, error)
+ stats.ErrorAvg += error
+ count += 1
+ }
+ }
+
+ stats.ErrorAvg /= float64(count)
+ fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax)
+ return
+}
diff --git a/vendor/github.com/jcmturner/gofork/LICENSE b/vendor/github.com/jcmturner/gofork/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md
new file mode 100644
index 0000000..66a2a8c
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/README.md
@@ -0,0 +1,5 @@
+This is a temporary repository that will be removed when the issues below are fixed in the core golang code.
+
+## Issues
+* [encoding/asn1: cannot marshal into a GeneralString](https://github.com/golang/go/issues/18832)
+* [encoding/asn1: cannot marshal into slice of strings and pass stringtype parameter tags to members](https://github.com/golang/go/issues/18834)
\ No newline at end of file
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go
new file mode 100644
index 0000000..f1bb767
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/asn1.go
@@ -0,0 +1,1003 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package asn1 implements parsing of DER-encoded ASN.1 data structures,
+// as defined in ITU-T Rec X.690.
+//
+// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,''
+// http://luca.ntop.org/Teaching/Appunti/asn1.html.
+package asn1
+
+// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc
+// are different encoding formats for those objects. Here, we'll be dealing
+// with DER, the Distinguished Encoding Rules. DER is used in X.509 because
+// it's fast to parse and, unlike BER, has a unique encoding for every object.
+// When calculating hashes over objects, it's important that the resulting
+// bytes be the same at both ends and DER removes this margin of error.
+//
+// ASN.1 is very complex and this package doesn't attempt to implement
+// everything by any means.
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+ "reflect"
+ "strconv"
+ "time"
+ "unicode/utf8"
+)
+
+// A StructuralError suggests that the ASN.1 data is valid, but the Go type
+// which is receiving it doesn't match.
+type StructuralError struct {
+ Msg string
+}
+
+func (e StructuralError) Error() string { return "asn1: structure error: " + e.Msg }
+
+// A SyntaxError suggests that the ASN.1 data is invalid.
+type SyntaxError struct {
+ Msg string
+}
+
+func (e SyntaxError) Error() string { return "asn1: syntax error: " + e.Msg }
+
+// We start by dealing with each of the primitive types in turn.
+
+// BOOLEAN
+
+func parseBool(bytes []byte) (ret bool, err error) {
+ if len(bytes) != 1 {
+ err = SyntaxError{"invalid boolean"}
+ return
+ }
+
+ // DER demands that "If the encoding represents the boolean value TRUE,
+ // its single contents octet shall have all eight bits set to one."
+ // Thus only 0 and 255 are valid encoded values.
+ switch bytes[0] {
+ case 0:
+ ret = false
+ case 0xff:
+ ret = true
+ default:
+ err = SyntaxError{"invalid boolean"}
+ }
+
+ return
+}
+
+// INTEGER
+
+// checkInteger returns nil if the given bytes are a valid DER-encoded
+// INTEGER and an error otherwise.
+func checkInteger(bytes []byte) error {
+ if len(bytes) == 0 {
+ return StructuralError{"empty integer"}
+ }
+ if len(bytes) == 1 {
+ return nil
+ }
+ if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) {
+ return StructuralError{"integer not minimally-encoded"}
+ }
+ return nil
+}
+
+// parseInt64 treats the given bytes as a big-endian, signed integer and
+// returns the result.
+func parseInt64(bytes []byte) (ret int64, err error) {
+ err = checkInteger(bytes)
+ if err != nil {
+ return
+ }
+ if len(bytes) > 8 {
+ // We'll overflow an int64 in this case.
+ err = StructuralError{"integer too large"}
+ return
+ }
+ for bytesRead := 0; bytesRead < len(bytes); bytesRead++ {
+ ret <<= 8
+ ret |= int64(bytes[bytesRead])
+ }
+
+ // Shift up and down in order to sign extend the result.
+ ret <<= 64 - uint8(len(bytes))*8
+ ret >>= 64 - uint8(len(bytes))*8
+ return
+}
+
+// parseInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseInt32(bytes []byte) (int32, error) {
+ if err := checkInteger(bytes); err != nil {
+ return 0, err
+ }
+ ret64, err := parseInt64(bytes)
+ if err != nil {
+ return 0, err
+ }
+ if ret64 != int64(int32(ret64)) {
+ return 0, StructuralError{"integer too large"}
+ }
+ return int32(ret64), nil
+}
+
+var bigOne = big.NewInt(1)
+
+// parseBigInt treats the given bytes as a big-endian, signed integer and returns
+// the result.
+func parseBigInt(bytes []byte) (*big.Int, error) {
+ if err := checkInteger(bytes); err != nil {
+ return nil, err
+ }
+ ret := new(big.Int)
+ if len(bytes) > 0 && bytes[0]&0x80 == 0x80 {
+ // This is a negative number.
+ notBytes := make([]byte, len(bytes))
+ for i := range notBytes {
+ notBytes[i] = ^bytes[i]
+ }
+ ret.SetBytes(notBytes)
+ ret.Add(ret, bigOne)
+ ret.Neg(ret)
+ return ret, nil
+ }
+ ret.SetBytes(bytes)
+ return ret, nil
+}
+
+// BIT STRING
+
+// BitString is the structure to use when you want an ASN.1 BIT STRING type. A
+// bit string is padded up to the nearest byte in memory and the number of
+// valid bits is recorded. Padding bits will be zero.
+type BitString struct {
+ Bytes []byte // bits packed into bytes.
+ BitLength int // length in bits.
+}
+
+// At returns the bit at the given index. If the index is out of range it
+// returns false.
+func (b BitString) At(i int) int {
+ if i < 0 || i >= b.BitLength {
+ return 0
+ }
+ x := i / 8
+ y := 7 - uint(i%8)
+ return int(b.Bytes[x]>>y) & 1
+}
+
+// RightAlign returns a slice where the padding bits are at the beginning. The
+// slice may share memory with the BitString.
+func (b BitString) RightAlign() []byte {
+ shift := uint(8 - (b.BitLength % 8))
+ if shift == 8 || len(b.Bytes) == 0 {
+ return b.Bytes
+ }
+
+ a := make([]byte, len(b.Bytes))
+ a[0] = b.Bytes[0] >> shift
+ for i := 1; i < len(b.Bytes); i++ {
+ a[i] = b.Bytes[i-1] << (8 - shift)
+ a[i] |= b.Bytes[i] >> shift
+ }
+
+ return a
+}
+
+// parseBitString parses an ASN.1 bit string from the given byte slice and returns it.
+func parseBitString(bytes []byte) (ret BitString, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length BIT STRING"}
+ return
+ }
+ paddingBits := int(bytes[0])
+ if paddingBits > 7 ||
+ len(bytes) == 1 && paddingBits > 0 ||
+ bytes[len(bytes)-1]&((1< 0 {
+ s += "."
+ }
+ s += strconv.Itoa(v)
+ }
+
+ return s
+}
+
+// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and
+// returns it. An object identifier is a sequence of variable length integers
+// that are assigned in a hierarchy.
+func parseObjectIdentifier(bytes []byte) (s []int, err error) {
+ if len(bytes) == 0 {
+ err = SyntaxError{"zero length OBJECT IDENTIFIER"}
+ return
+ }
+
+ // In the worst case, we get two elements from the first byte (which is
+ // encoded differently) and then every varint is a single byte long.
+ s = make([]int, len(bytes)+1)
+
+ // The first varint is 40*value1 + value2:
+ // According to this packing, value1 can take the values 0, 1 and 2 only.
+ // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
+ // then there are no restrictions on value2.
+ v, offset, err := parseBase128Int(bytes, 0)
+ if err != nil {
+ return
+ }
+ if v < 80 {
+ s[0] = v / 40
+ s[1] = v % 40
+ } else {
+ s[0] = 2
+ s[1] = v - 80
+ }
+
+ i := 2
+ for ; offset < len(bytes); i++ {
+ v, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ s[i] = v
+ }
+ s = s[0:i]
+ return
+}
+
+// ENUMERATED
+
+// An Enumerated is represented as a plain int.
+type Enumerated int
+
+// FLAG
+
+// A Flag accepts any data and is set to true if present.
+type Flag bool
+
+// parseBase128Int parses a base-128 encoded int from the given offset in the
+// given byte slice. It returns the value and the new offset.
+func parseBase128Int(bytes []byte, initOffset int) (ret, offset int, err error) {
+ offset = initOffset
+ for shifted := 0; offset < len(bytes); shifted++ {
+ if shifted == 4 {
+ err = StructuralError{"base 128 integer too large"}
+ return
+ }
+ ret <<= 7
+ b := bytes[offset]
+ ret |= int(b & 0x7f)
+ offset++
+ if b&0x80 == 0 {
+ return
+ }
+ }
+ err = SyntaxError{"truncated base 128 integer"}
+ return
+}
+
+// UTCTime
+
+func parseUTCTime(bytes []byte) (ret time.Time, err error) {
+ s := string(bytes)
+
+ formatStr := "0601021504Z0700"
+ ret, err = time.Parse(formatStr, s)
+ if err != nil {
+ formatStr = "060102150405Z0700"
+ ret, err = time.Parse(formatStr, s)
+ }
+ if err != nil {
+ return
+ }
+
+ if serialized := ret.Format(formatStr); serialized != s {
+ err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+ return
+ }
+
+ if ret.Year() >= 2050 {
+ // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
+ ret = ret.AddDate(-100, 0, 0)
+ }
+
+ return
+}
+
+// parseGeneralizedTime parses the GeneralizedTime from the given byte slice
+// and returns the resulting time.
+func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) {
+ const formatStr = "20060102150405Z0700"
+ s := string(bytes)
+
+ if ret, err = time.Parse(formatStr, s); err != nil {
+ return
+ }
+
+ if serialized := ret.Format(formatStr); serialized != s {
+ err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized)
+ }
+
+ return
+}
+
+// PrintableString
+
+// parsePrintableString parses a ASN.1 PrintableString from the given byte
+// array and returns it.
+func parsePrintableString(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if !isPrintable(b) {
+ err = SyntaxError{"PrintableString contains invalid character"}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+// isPrintable reports whether the given b is in the ASN.1 PrintableString set.
+func isPrintable(b byte) bool {
+ return 'a' <= b && b <= 'z' ||
+ 'A' <= b && b <= 'Z' ||
+ '0' <= b && b <= '9' ||
+ '\'' <= b && b <= ')' ||
+ '+' <= b && b <= '/' ||
+ b == ' ' ||
+ b == ':' ||
+ b == '=' ||
+ b == '?' ||
+ // This is technically not allowed in a PrintableString.
+ // However, x509 certificates with wildcard strings don't
+ // always use the correct string type so we permit it.
+ b == '*'
+}
+
+// IA5String
+
+// parseIA5String parses a ASN.1 IA5String (ASCII string) from the given
+// byte slice and returns it.
+func parseIA5String(bytes []byte) (ret string, err error) {
+ for _, b := range bytes {
+ if b >= utf8.RuneSelf {
+ err = SyntaxError{"IA5String contains invalid character"}
+ return
+ }
+ }
+ ret = string(bytes)
+ return
+}
+
+// T61String
+
+// parseT61String parses a ASN.1 T61String (8-bit clean string) from the given
+// byte slice and returns it.
+func parseT61String(bytes []byte) (ret string, err error) {
+ return string(bytes), nil
+}
+
+// UTF8String
+
+// parseUTF8String parses a ASN.1 UTF8String (raw UTF-8) from the given byte
+// array and returns it.
+func parseUTF8String(bytes []byte) (ret string, err error) {
+ if !utf8.Valid(bytes) {
+ return "", errors.New("asn1: invalid UTF-8 string")
+ }
+ return string(bytes), nil
+}
+
+// A RawValue represents an undecoded ASN.1 object.
+type RawValue struct {
+ Class, Tag int
+ IsCompound bool
+ Bytes []byte
+ FullBytes []byte // includes the tag and length
+}
+
+// RawContent is used to signal that the undecoded, DER data needs to be
+// preserved for a struct. To use it, the first field of the struct must have
+// this type. It's an error for any of the other fields to have this type.
+type RawContent []byte
+
+// Tagging
+
+// parseTagAndLength parses an ASN.1 tag and length pair from the given offset
+// into a byte slice. It returns the parsed data and the new offset. SET and
+// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we
+// don't distinguish between ordered and unordered objects in this code.
+func parseTagAndLength(bytes []byte, initOffset int) (ret tagAndLength, offset int, err error) {
+ offset = initOffset
+ // parseTagAndLength should not be called without at least a single
+ // byte to read. Thus this check is for robustness:
+ if offset >= len(bytes) {
+ err = errors.New("asn1: internal error in parseTagAndLength")
+ return
+ }
+ b := bytes[offset]
+ offset++
+ ret.class = int(b >> 6)
+ ret.isCompound = b&0x20 == 0x20
+ ret.tag = int(b & 0x1f)
+
+ // If the bottom five bits are set, then the tag number is actually base 128
+ // encoded afterwards
+ if ret.tag == 0x1f {
+ ret.tag, offset, err = parseBase128Int(bytes, offset)
+ if err != nil {
+ return
+ }
+ // Tags should be encoded in minimal form.
+ if ret.tag < 0x1f {
+ err = SyntaxError{"non-minimal tag"}
+ return
+ }
+ }
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length"}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if b&0x80 == 0 {
+ // The length is encoded in the bottom 7 bits.
+ ret.length = int(b & 0x7f)
+ } else {
+ // Bottom 7 bits give the number of length bytes to follow.
+ numBytes := int(b & 0x7f)
+ if numBytes == 0 {
+ err = SyntaxError{"indefinite length found (not DER)"}
+ return
+ }
+ ret.length = 0
+ for i := 0; i < numBytes; i++ {
+ if offset >= len(bytes) {
+ err = SyntaxError{"truncated tag or length"}
+ return
+ }
+ b = bytes[offset]
+ offset++
+ if ret.length >= 1<<23 {
+ // We can't shift ret.length up without
+ // overflowing.
+ err = StructuralError{"length too large"}
+ return
+ }
+ ret.length <<= 8
+ ret.length |= int(b)
+ if ret.length == 0 {
+ // DER requires that lengths be minimal.
+ err = StructuralError{"superfluous leading zeros in length"}
+ return
+ }
+ }
+ // Short lengths must be encoded in short form.
+ if ret.length < 0x80 {
+ err = StructuralError{"non-minimal length"}
+ return
+ }
+ }
+
+ return
+}
+
+// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse
+// a number of ASN.1 values from the given byte slice and returns them as a
+// slice of Go values of the given type.
+func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type) (ret reflect.Value, err error) {
+ expectedTag, compoundType, ok := getUniversalType(elemType)
+ if !ok {
+ err = StructuralError{"unknown Go type for slice"}
+ return
+ }
+
+ // First we iterate over the input and count the number of elements,
+ // checking that the types are correct in each case.
+ numElements := 0
+ for offset := 0; offset < len(bytes); {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ switch t.tag {
+ case TagIA5String, TagGeneralString, TagT61String, TagUTF8String:
+ // We pretend that various other string types are
+ // PRINTABLE STRINGs so that a sequence of them can be
+ // parsed into a []string.
+ t.tag = TagPrintableString
+ case TagGeneralizedTime, TagUTCTime:
+ // Likewise, both time types are treated the same.
+ t.tag = TagUTCTime
+ }
+
+ if t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag {
+ err = StructuralError{"sequence tag mismatch"}
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"truncated sequence"}
+ return
+ }
+ offset += t.length
+ numElements++
+ }
+ ret = reflect.MakeSlice(sliceType, numElements, numElements)
+ params := fieldParameters{}
+ offset := 0
+ for i := 0; i < numElements; i++ {
+ offset, err = parseField(ret.Index(i), bytes, offset, params)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+var (
+ bitStringType = reflect.TypeOf(BitString{})
+ objectIdentifierType = reflect.TypeOf(ObjectIdentifier{})
+ enumeratedType = reflect.TypeOf(Enumerated(0))
+ flagType = reflect.TypeOf(Flag(false))
+ timeType = reflect.TypeOf(time.Time{})
+ rawValueType = reflect.TypeOf(RawValue{})
+ rawContentsType = reflect.TypeOf(RawContent(nil))
+ bigIntType = reflect.TypeOf(new(big.Int))
+)
+
+// invalidLength returns true iff offset + length > sliceLength, or if the
+// addition would overflow.
+func invalidLength(offset, length, sliceLength int) bool {
+ return offset+length < offset || offset+length > sliceLength
+}
+
+// parseField is the main parsing function. Given a byte slice and an offset
+// into the array, it will try to parse a suitable ASN.1 value out and store it
+// in the given Value.
+func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) {
+ offset = initOffset
+ fieldType := v.Type()
+
+ // If we have run out of data, it may be that there are optional elements at the end.
+ if offset == len(bytes) {
+ if !setDefaultValue(v, params) {
+ err = SyntaxError{"sequence truncated"}
+ }
+ return
+ }
+
+ // Deal with raw values.
+ if fieldType == rawValueType {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ result := RawValue{t.class, t.tag, t.isCompound, bytes[offset : offset+t.length], bytes[initOffset : offset+t.length]}
+ offset += t.length
+ v.Set(reflect.ValueOf(result))
+ return
+ }
+
+ // Deal with the ANY type.
+ if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 {
+ var t tagAndLength
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ var result interface{}
+ if !t.isCompound && t.class == ClassUniversal {
+ innerBytes := bytes[offset : offset+t.length]
+ switch t.tag {
+ case TagPrintableString:
+ result, err = parsePrintableString(innerBytes)
+ case TagIA5String:
+ result, err = parseIA5String(innerBytes)
+ // jtasn1 addition of following case
+ case TagGeneralString:
+ result, err = parseIA5String(innerBytes)
+ case TagT61String:
+ result, err = parseT61String(innerBytes)
+ case TagUTF8String:
+ result, err = parseUTF8String(innerBytes)
+ case TagInteger:
+ result, err = parseInt64(innerBytes)
+ case TagBitString:
+ result, err = parseBitString(innerBytes)
+ case TagOID:
+ result, err = parseObjectIdentifier(innerBytes)
+ case TagUTCTime:
+ result, err = parseUTCTime(innerBytes)
+ case TagGeneralizedTime:
+ result, err = parseGeneralizedTime(innerBytes)
+ case TagOctetString:
+ result = innerBytes
+ default:
+ // If we don't know how to handle the type, we just leave Value as nil.
+ }
+ }
+ offset += t.length
+ if err != nil {
+ return
+ }
+ if result != nil {
+ v.Set(reflect.ValueOf(result))
+ }
+ return
+ }
+ universalTag, compoundType, ok1 := getUniversalType(fieldType)
+ if !ok1 {
+ err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType)}
+ return
+ }
+
+ t, offset, err := parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ if params.explicit {
+ expectedClass := ClassContextSpecific
+ if params.application {
+ expectedClass = ClassApplication
+ }
+ if offset == len(bytes) {
+ err = StructuralError{"explicit tag has no child"}
+ return
+ }
+ if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) {
+ if t.length > 0 {
+ t, offset, err = parseTagAndLength(bytes, offset)
+ if err != nil {
+ return
+ }
+ } else {
+ if fieldType != flagType {
+ err = StructuralError{"zero length explicit tag was not an asn1.Flag"}
+ return
+ }
+ v.SetBool(true)
+ return
+ }
+ } else {
+ // The tags didn't match, it might be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{"explicitly tagged member didn't match"}
+ }
+ return
+ }
+ }
+
+ // Special case for strings: all the ASN.1 string types map to the Go
+ // type string. getUniversalType returns the tag for PrintableString
+ // when it sees a string, so if we see a different string type on the
+ // wire, we change the universal type to match.
+ if universalTag == TagPrintableString {
+ if t.class == ClassUniversal {
+ switch t.tag {
+ case TagIA5String, TagGeneralString, TagT61String, TagUTF8String:
+ universalTag = t.tag
+ }
+ } else if params.stringType != 0 {
+ universalTag = params.stringType
+ }
+ }
+
+ // Special case for time: UTCTime and GeneralizedTime both map to the
+ // Go type time.Time.
+ if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal {
+ universalTag = TagGeneralizedTime
+ }
+
+ if params.set {
+ universalTag = TagSet
+ }
+
+ expectedClass := ClassUniversal
+ expectedTag := universalTag
+
+ if !params.explicit && params.tag != nil {
+ expectedClass = ClassContextSpecific
+ expectedTag = *params.tag
+ }
+
+ if !params.explicit && params.application && params.tag != nil {
+ expectedClass = ClassApplication
+ expectedTag = *params.tag
+ }
+
+ // We have unwrapped any explicit tagging at this point.
+ if t.class != expectedClass || t.tag != expectedTag || t.isCompound != compoundType {
+ // Tags don't match. Again, it could be an optional element.
+ ok := setDefaultValue(v, params)
+ if ok {
+ offset = initOffset
+ } else {
+ err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset)}
+ }
+ return
+ }
+ if invalidLength(offset, t.length, len(bytes)) {
+ err = SyntaxError{"data truncated"}
+ return
+ }
+ innerBytes := bytes[offset : offset+t.length]
+ offset += t.length
+
+ // We deal with the structures defined in this package first.
+ switch fieldType {
+ case objectIdentifierType:
+ newSlice, err1 := parseObjectIdentifier(innerBytes)
+ v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice)))
+ if err1 == nil {
+ reflect.Copy(v, reflect.ValueOf(newSlice))
+ }
+ err = err1
+ return
+ case bitStringType:
+ bs, err1 := parseBitString(innerBytes)
+ if err1 == nil {
+ v.Set(reflect.ValueOf(bs))
+ }
+ err = err1
+ return
+ case timeType:
+ var time time.Time
+ var err1 error
+ if universalTag == TagUTCTime {
+ time, err1 = parseUTCTime(innerBytes)
+ } else {
+ time, err1 = parseGeneralizedTime(innerBytes)
+ }
+ if err1 == nil {
+ v.Set(reflect.ValueOf(time))
+ }
+ err = err1
+ return
+ case enumeratedType:
+ parsedInt, err1 := parseInt32(innerBytes)
+ if err1 == nil {
+ v.SetInt(int64(parsedInt))
+ }
+ err = err1
+ return
+ case flagType:
+ v.SetBool(true)
+ return
+ case bigIntType:
+ parsedInt, err1 := parseBigInt(innerBytes)
+ if err1 == nil {
+ v.Set(reflect.ValueOf(parsedInt))
+ }
+ err = err1
+ return
+ }
+ switch val := v; val.Kind() {
+ case reflect.Bool:
+ parsedBool, err1 := parseBool(innerBytes)
+ if err1 == nil {
+ val.SetBool(parsedBool)
+ }
+ err = err1
+ return
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ if val.Type().Size() == 4 {
+ parsedInt, err1 := parseInt32(innerBytes)
+ if err1 == nil {
+ val.SetInt(int64(parsedInt))
+ }
+ err = err1
+ } else {
+ parsedInt, err1 := parseInt64(innerBytes)
+ if err1 == nil {
+ val.SetInt(parsedInt)
+ }
+ err = err1
+ }
+ return
+ // TODO(dfc) Add support for the remaining integer types
+ case reflect.Struct:
+ structType := fieldType
+
+ if structType.NumField() > 0 &&
+ structType.Field(0).Type == rawContentsType {
+ bytes := bytes[initOffset:offset]
+ val.Field(0).Set(reflect.ValueOf(RawContent(bytes)))
+ }
+
+ innerOffset := 0
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ if i == 0 && field.Type == rawContentsType {
+ continue
+ }
+ innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, parseFieldParameters(field.Tag.Get("asn1")))
+ if err != nil {
+ return
+ }
+ }
+ // We allow extra bytes at the end of the SEQUENCE because
+ // adding elements to the end has been used in X.509 as the
+ // version numbers have increased.
+ return
+ case reflect.Slice:
+ sliceType := fieldType
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes)))
+ reflect.Copy(val, reflect.ValueOf(innerBytes))
+ return
+ }
+ newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem())
+ if err1 == nil {
+ val.Set(newSlice)
+ }
+ err = err1
+ return
+ case reflect.String:
+ var v string
+ switch universalTag {
+ case TagPrintableString:
+ v, err = parsePrintableString(innerBytes)
+ case TagIA5String:
+ v, err = parseIA5String(innerBytes)
+ case TagT61String:
+ v, err = parseT61String(innerBytes)
+ case TagUTF8String:
+ v, err = parseUTF8String(innerBytes)
+ case TagGeneralString:
+ // GeneralString is specified in ISO-2022/ECMA-35,
+ // A brief review suggests that it includes structures
+ // that allow the encoding to change midstring and
+ // such. We give up and pass it as an 8-bit string.
+ v, err = parseT61String(innerBytes)
+ default:
+ err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag)}
+ }
+ if err == nil {
+ val.SetString(v)
+ }
+ return
+ }
+ err = StructuralError{"unsupported: " + v.Type().String()}
+ return
+}
+
+// canHaveDefaultValue reports whether k is a Kind that we will set a default
+// value for. (A signed integer, essentially.)
+func canHaveDefaultValue(k reflect.Kind) bool {
+ switch k {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ }
+
+ return false
+}
+
+// setDefaultValue is used to install a default value, from a tag string, into
+// a Value. It is successful if the field was optional, even if a default value
+// wasn't provided or it failed to install it into the Value.
+func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) {
+ if !params.optional {
+ return
+ }
+ ok = true
+ if params.defaultValue == nil {
+ return
+ }
+ if canHaveDefaultValue(v.Kind()) {
+ v.SetInt(*params.defaultValue)
+ }
+ return
+}
+
+// Unmarshal parses the DER-encoded ASN.1 data structure b
+// and uses the reflect package to fill in an arbitrary value pointed at by val.
+// Because Unmarshal uses the reflect package, the structs
+// being written to must use upper case field names.
+//
+// An ASN.1 INTEGER can be written to an int, int32, int64,
+// or *big.Int (from the math/big package).
+// If the encoded value does not fit in the Go type,
+// Unmarshal returns a parse error.
+//
+// An ASN.1 BIT STRING can be written to a BitString.
+//
+// An ASN.1 OCTET STRING can be written to a []byte.
+//
+// An ASN.1 OBJECT IDENTIFIER can be written to an
+// ObjectIdentifier.
+//
+// An ASN.1 ENUMERATED can be written to an Enumerated.
+//
+// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time.
+//
+// An ASN.1 PrintableString or IA5String can be written to a string.
+//
+// Any of the above ASN.1 values can be written to an interface{}.
+// The value stored in the interface has the corresponding Go type.
+// For integers, that type is int64.
+//
+// An ASN.1 SEQUENCE OF x or SET OF x can be written
+// to a slice if an x can be written to the slice's element type.
+//
+// An ASN.1 SEQUENCE or SET can be written to a struct
+// if each of the elements in the sequence can be
+// written to the corresponding element in the struct.
+//
+// The following tags on struct fields have special meaning to Unmarshal:
+//
+// application specifies that a APPLICATION tag is used
+// default:x sets the default value for optional integer fields
+// explicit specifies that an additional, explicit tag wraps the implicit one
+// optional marks the field as ASN.1 OPTIONAL
+// set causes a SET, rather than a SEQUENCE type to be expected
+// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC
+//
+// If the type of the first field of a structure is RawContent then the raw
+// ASN1 contents of the struct will be stored in it.
+//
+// If the type name of a slice element ends with "SET" then it's treated as if
+// the "set" tag was set on it. This can be used with nested slices where a
+// struct tag cannot be given.
+//
+// Other ASN.1 types are not supported; if it encounters them,
+// Unmarshal returns a parse error.
+func Unmarshal(b []byte, val interface{}) (rest []byte, err error) {
+ return UnmarshalWithParams(b, val, "")
+}
+
+// UnmarshalWithParams allows field parameters to be specified for the
+// top-level element. The form of the params is the same as the field tags.
+func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) {
+ v := reflect.ValueOf(val).Elem()
+ offset, err := parseField(v, b, 0, parseFieldParameters(params))
+ if err != nil {
+ return nil, err
+ }
+ return b[offset:], nil
+}
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go
new file mode 100644
index 0000000..7a9da49
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/common.go
@@ -0,0 +1,173 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// ASN.1 objects have metadata preceding them:
+// the tag: the type of the object
+// a flag denoting if this object is compound or not
+// the class type: the namespace of the tag
+// the length of the object, in bytes
+
+// Here are some standard tags and classes
+
+// ASN.1 tags represent the type of the following object.
+const (
+ TagBoolean = 1
+ TagInteger = 2
+ TagBitString = 3
+ TagOctetString = 4
+ TagOID = 6
+ TagEnum = 10
+ TagUTF8String = 12
+ TagSequence = 16
+ TagSet = 17
+ TagPrintableString = 19
+ TagT61String = 20
+ TagIA5String = 22
+ TagUTCTime = 23
+ TagGeneralizedTime = 24
+ TagGeneralString = 27
+)
+
+// ASN.1 class types represent the namespace of the tag.
+const (
+ ClassUniversal = 0
+ ClassApplication = 1
+ ClassContextSpecific = 2
+ ClassPrivate = 3
+)
+
+type tagAndLength struct {
+ class, tag, length int
+ isCompound bool
+}
+
+// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead
+// of" and "in addition to". When not specified, every primitive type has a
+// default tag in the UNIVERSAL class.
+//
+// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1
+// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT
+// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another.
+//
+// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an
+// /additional/ tag would wrap the default tag. This explicit tag will have the
+// compound flag set.
+//
+// (This is used in order to remove ambiguity with optional elements.)
+//
+// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we
+// don't support that here. We support a single layer of EXPLICIT or IMPLICIT
+// tagging with tag strings on the fields of a structure.
+
+// fieldParameters is the parsed representation of tag string from a structure field.
+type fieldParameters struct {
+ optional bool // true iff the field is OPTIONAL
+ explicit bool // true iff an EXPLICIT tag is in use.
+ application bool // true iff an APPLICATION tag is in use.
+ defaultValue *int64 // a default value for INTEGER typed fields (maybe nil).
+ tag *int // the EXPLICIT or IMPLICIT tag (maybe nil).
+ stringType int // the string tag to use when marshaling.
+ timeType int // the time tag to use when marshaling.
+ set bool // true iff this should be encoded as a SET
+ omitEmpty bool // true iff this should be omitted if empty when marshaling.
+
+ // Invariants:
+ // if explicit is set, tag is non-nil.
+}
+
+// Given a tag string with the format specified in the package comment,
+// parseFieldParameters will parse it into a fieldParameters structure,
+// ignoring unknown parts of the string.
+func parseFieldParameters(str string) (ret fieldParameters) {
+ for _, part := range strings.Split(str, ",") {
+ switch {
+ case part == "optional":
+ ret.optional = true
+ case part == "explicit":
+ ret.explicit = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ case part == "generalized":
+ ret.timeType = TagGeneralizedTime
+ case part == "utc":
+ ret.timeType = TagUTCTime
+ case part == "ia5":
+ ret.stringType = TagIA5String
+ // jtasn1 case below added
+ case part == "generalstring":
+ ret.stringType = TagGeneralString
+ case part == "printable":
+ ret.stringType = TagPrintableString
+ case part == "utf8":
+ ret.stringType = TagUTF8String
+ case strings.HasPrefix(part, "default:"):
+ i, err := strconv.ParseInt(part[8:], 10, 64)
+ if err == nil {
+ ret.defaultValue = new(int64)
+ *ret.defaultValue = i
+ }
+ case strings.HasPrefix(part, "tag:"):
+ i, err := strconv.Atoi(part[4:])
+ if err == nil {
+ ret.tag = new(int)
+ *ret.tag = i
+ }
+ case part == "set":
+ ret.set = true
+ case part == "application":
+ ret.application = true
+ if ret.tag == nil {
+ ret.tag = new(int)
+ }
+ case part == "omitempty":
+ ret.omitEmpty = true
+ }
+ }
+ return
+}
+
+// Given a reflected Go type, getUniversalType returns the default tag number
+// and expected compound flag.
+func getUniversalType(t reflect.Type) (tagNumber int, isCompound, ok bool) {
+ switch t {
+ case objectIdentifierType:
+ return TagOID, false, true
+ case bitStringType:
+ return TagBitString, false, true
+ case timeType:
+ return TagUTCTime, false, true
+ case enumeratedType:
+ return TagEnum, false, true
+ case bigIntType:
+ return TagInteger, false, true
+ }
+ switch t.Kind() {
+ case reflect.Bool:
+ return TagBoolean, false, true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return TagInteger, false, true
+ case reflect.Struct:
+ return TagSequence, true, true
+ case reflect.Slice:
+ if t.Elem().Kind() == reflect.Uint8 {
+ return TagOctetString, false, true
+ }
+ if strings.HasSuffix(t.Name(), "SET") {
+ return TagSet, true, true
+ }
+ return TagSequence, true, true
+ case reflect.String:
+ return TagPrintableString, false, true
+ }
+ return 0, false, false
+}
diff --git a/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go
new file mode 100644
index 0000000..f52eee9
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/encoding/asn1/marshal.go
@@ -0,0 +1,659 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asn1
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "math/big"
+ "reflect"
+ "time"
+ "unicode/utf8"
+)
+
+// A forkableWriter is an in-memory buffer that can be
+// 'forked' to create new forkableWriters that bracket the
+// original. After
+// pre, post := w.fork()
+// the overall sequence of bytes represented is logically w+pre+post.
+type forkableWriter struct {
+ *bytes.Buffer
+ pre, post *forkableWriter
+}
+
+func newForkableWriter() *forkableWriter {
+ return &forkableWriter{new(bytes.Buffer), nil, nil}
+}
+
+func (f *forkableWriter) fork() (pre, post *forkableWriter) {
+ if f.pre != nil || f.post != nil {
+ panic("have already forked")
+ }
+ f.pre = newForkableWriter()
+ f.post = newForkableWriter()
+ return f.pre, f.post
+}
+
+func (f *forkableWriter) Len() (l int) {
+ l += f.Buffer.Len()
+ if f.pre != nil {
+ l += f.pre.Len()
+ }
+ if f.post != nil {
+ l += f.post.Len()
+ }
+ return
+}
+
+func (f *forkableWriter) writeTo(out io.Writer) (n int, err error) {
+ n, err = out.Write(f.Bytes())
+ if err != nil {
+ return
+ }
+
+ var nn int
+
+ if f.pre != nil {
+ nn, err = f.pre.writeTo(out)
+ n += nn
+ if err != nil {
+ return
+ }
+ }
+
+ if f.post != nil {
+ nn, err = f.post.writeTo(out)
+ n += nn
+ }
+ return
+}
+
+func marshalBase128Int(out *forkableWriter, n int64) (err error) {
+ if n == 0 {
+ err = out.WriteByte(0)
+ return
+ }
+
+ l := 0
+ for i := n; i > 0; i >>= 7 {
+ l++
+ }
+
+ for i := l - 1; i >= 0; i-- {
+ o := byte(n >> uint(i*7))
+ o &= 0x7f
+ if i != 0 {
+ o |= 0x80
+ }
+ err = out.WriteByte(o)
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func marshalInt64(out *forkableWriter, i int64) (err error) {
+ n := int64Length(i)
+
+ for ; n > 0; n-- {
+ err = out.WriteByte(byte(i >> uint((n-1)*8)))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func int64Length(i int64) (numBytes int) {
+ numBytes = 1
+
+ for i > 127 {
+ numBytes++
+ i >>= 8
+ }
+
+ for i < -128 {
+ numBytes++
+ i >>= 8
+ }
+
+ return
+}
+
+func marshalBigInt(out *forkableWriter, n *big.Int) (err error) {
+ if n.Sign() < 0 {
+ // A negative number has to be converted to two's-complement
+ // form. So we'll subtract 1 and invert. If the
+ // most-significant-bit isn't set then we'll need to pad the
+ // beginning with 0xff in order to keep the number negative.
+ nMinus1 := new(big.Int).Neg(n)
+ nMinus1.Sub(nMinus1, bigOne)
+ bytes := nMinus1.Bytes()
+ for i := range bytes {
+ bytes[i] ^= 0xff
+ }
+ if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+ err = out.WriteByte(0xff)
+ if err != nil {
+ return
+ }
+ }
+ _, err = out.Write(bytes)
+ } else if n.Sign() == 0 {
+ // Zero is written as a single 0 zero rather than no bytes.
+ err = out.WriteByte(0x00)
+ } else {
+ bytes := n.Bytes()
+ if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+ // We'll have to pad this with 0x00 in order to stop it
+ // looking like a negative number.
+ err = out.WriteByte(0)
+ if err != nil {
+ return
+ }
+ }
+ _, err = out.Write(bytes)
+ }
+ return
+}
+
+func marshalLength(out *forkableWriter, i int) (err error) {
+ n := lengthLength(i)
+
+ for ; n > 0; n-- {
+ err = out.WriteByte(byte(i >> uint((n-1)*8)))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func lengthLength(i int) (numBytes int) {
+ numBytes = 1
+ for i > 255 {
+ numBytes++
+ i >>= 8
+ }
+ return
+}
+
+func marshalTagAndLength(out *forkableWriter, t tagAndLength) (err error) {
+ b := uint8(t.class) << 6
+ if t.isCompound {
+ b |= 0x20
+ }
+ if t.tag >= 31 {
+ b |= 0x1f
+ err = out.WriteByte(b)
+ if err != nil {
+ return
+ }
+ err = marshalBase128Int(out, int64(t.tag))
+ if err != nil {
+ return
+ }
+ } else {
+ b |= uint8(t.tag)
+ err = out.WriteByte(b)
+ if err != nil {
+ return
+ }
+ }
+
+ if t.length >= 128 {
+ l := lengthLength(t.length)
+ err = out.WriteByte(0x80 | byte(l))
+ if err != nil {
+ return
+ }
+ err = marshalLength(out, t.length)
+ if err != nil {
+ return
+ }
+ } else {
+ err = out.WriteByte(byte(t.length))
+ if err != nil {
+ return
+ }
+ }
+
+ return nil
+}
+
+func marshalBitString(out *forkableWriter, b BitString) (err error) {
+ paddingBits := byte((8 - b.BitLength%8) % 8)
+ err = out.WriteByte(paddingBits)
+ if err != nil {
+ return
+ }
+ _, err = out.Write(b.Bytes)
+ return
+}
+
+func marshalObjectIdentifier(out *forkableWriter, oid []int) (err error) {
+ if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) {
+ return StructuralError{"invalid object identifier"}
+ }
+
+ err = marshalBase128Int(out, int64(oid[0]*40+oid[1]))
+ if err != nil {
+ return
+ }
+ for i := 2; i < len(oid); i++ {
+ err = marshalBase128Int(out, int64(oid[i]))
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func marshalPrintableString(out *forkableWriter, s string) (err error) {
+ b := []byte(s)
+ for _, c := range b {
+ if !isPrintable(c) {
+ return StructuralError{"PrintableString contains invalid character"}
+ }
+ }
+
+ _, err = out.Write(b)
+ return
+}
+
+func marshalIA5String(out *forkableWriter, s string) (err error) {
+ b := []byte(s)
+ for _, c := range b {
+ if c > 127 {
+ return StructuralError{"IA5String contains invalid character"}
+ }
+ }
+
+ _, err = out.Write(b)
+ return
+}
+
+func marshalUTF8String(out *forkableWriter, s string) (err error) {
+ _, err = out.Write([]byte(s))
+ return
+}
+
+func marshalTwoDigits(out *forkableWriter, v int) (err error) {
+ err = out.WriteByte(byte('0' + (v/10)%10))
+ if err != nil {
+ return
+ }
+ return out.WriteByte(byte('0' + v%10))
+}
+
+func marshalFourDigits(out *forkableWriter, v int) (err error) {
+ var bytes [4]byte
+ for i := range bytes {
+ bytes[3-i] = '0' + byte(v%10)
+ v /= 10
+ }
+ _, err = out.Write(bytes[:])
+ return
+}
+
+func outsideUTCRange(t time.Time) bool {
+ year := t.Year()
+ return year < 1950 || year >= 2050
+}
+
+func marshalUTCTime(out *forkableWriter, t time.Time) (err error) {
+ year := t.Year()
+
+ switch {
+ case 1950 <= year && year < 2000:
+ err = marshalTwoDigits(out, year-1900)
+ case 2000 <= year && year < 2050:
+ err = marshalTwoDigits(out, year-2000)
+ default:
+ return StructuralError{"cannot represent time as UTCTime"}
+ }
+ if err != nil {
+ return
+ }
+
+ return marshalTimeCommon(out, t)
+}
+
+func marshalGeneralizedTime(out *forkableWriter, t time.Time) (err error) {
+ year := t.Year()
+ if year < 0 || year > 9999 {
+ return StructuralError{"cannot represent time as GeneralizedTime"}
+ }
+ if err = marshalFourDigits(out, year); err != nil {
+ return
+ }
+
+ return marshalTimeCommon(out, t)
+}
+
+func marshalTimeCommon(out *forkableWriter, t time.Time) (err error) {
+ _, month, day := t.Date()
+
+ err = marshalTwoDigits(out, int(month))
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, day)
+ if err != nil {
+ return
+ }
+
+ hour, min, sec := t.Clock()
+
+ err = marshalTwoDigits(out, hour)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, min)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, sec)
+ if err != nil {
+ return
+ }
+
+ _, offset := t.Zone()
+
+ switch {
+ case offset/60 == 0:
+ err = out.WriteByte('Z')
+ return
+ case offset > 0:
+ err = out.WriteByte('+')
+ case offset < 0:
+ err = out.WriteByte('-')
+ }
+
+ if err != nil {
+ return
+ }
+
+ offsetMinutes := offset / 60
+ if offsetMinutes < 0 {
+ offsetMinutes = -offsetMinutes
+ }
+
+ err = marshalTwoDigits(out, offsetMinutes/60)
+ if err != nil {
+ return
+ }
+
+ err = marshalTwoDigits(out, offsetMinutes%60)
+ return
+}
+
+func stripTagAndLength(in []byte) []byte {
+ _, offset, err := parseTagAndLength(in, 0)
+ if err != nil {
+ return in
+ }
+ return in[offset:]
+}
+
+func marshalBody(out *forkableWriter, value reflect.Value, params fieldParameters) (err error) {
+ switch value.Type() {
+ case flagType:
+ return nil
+ case timeType:
+ t := value.Interface().(time.Time)
+ if params.timeType == TagGeneralizedTime || outsideUTCRange(t) {
+ return marshalGeneralizedTime(out, t)
+ } else {
+ return marshalUTCTime(out, t)
+ }
+ case bitStringType:
+ return marshalBitString(out, value.Interface().(BitString))
+ case objectIdentifierType:
+ return marshalObjectIdentifier(out, value.Interface().(ObjectIdentifier))
+ case bigIntType:
+ return marshalBigInt(out, value.Interface().(*big.Int))
+ }
+
+ switch v := value; v.Kind() {
+ case reflect.Bool:
+ if v.Bool() {
+ return out.WriteByte(255)
+ } else {
+ return out.WriteByte(0)
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return marshalInt64(out, v.Int())
+ case reflect.Struct:
+ t := v.Type()
+
+ startingField := 0
+
+ // If the first element of the structure is a non-empty
+ // RawContents, then we don't bother serializing the rest.
+ if t.NumField() > 0 && t.Field(0).Type == rawContentsType {
+ s := v.Field(0)
+ if s.Len() > 0 {
+ bytes := make([]byte, s.Len())
+ for i := 0; i < s.Len(); i++ {
+ bytes[i] = uint8(s.Index(i).Uint())
+ }
+ /* The RawContents will contain the tag and
+ * length fields but we'll also be writing
+ * those ourselves, so we strip them out of
+ * bytes */
+ _, err = out.Write(stripTagAndLength(bytes))
+ return
+ } else {
+ startingField = 1
+ }
+ }
+
+ for i := startingField; i < t.NumField(); i++ {
+ var pre *forkableWriter
+ pre, out = out.fork()
+ err = marshalField(pre, v.Field(i), parseFieldParameters(t.Field(i).Tag.Get("asn1")))
+ if err != nil {
+ return
+ }
+ }
+ return
+ case reflect.Slice:
+ sliceType := v.Type()
+ if sliceType.Elem().Kind() == reflect.Uint8 {
+ bytes := make([]byte, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ bytes[i] = uint8(v.Index(i).Uint())
+ }
+ _, err = out.Write(bytes)
+ return
+ }
+
+ // jtasn1 Pass on the tags to the members but need to unset explicit switch and implicit value
+ //var fp fieldParameters
+ params.explicit = false
+ params.tag = nil
+ for i := 0; i < v.Len(); i++ {
+ var pre *forkableWriter
+ pre, out = out.fork()
+ err = marshalField(pre, v.Index(i), params)
+ if err != nil {
+ return
+ }
+ }
+ return
+ case reflect.String:
+ switch params.stringType {
+ case TagIA5String:
+ return marshalIA5String(out, v.String())
+ case TagPrintableString:
+ return marshalPrintableString(out, v.String())
+ default:
+ return marshalUTF8String(out, v.String())
+ }
+ }
+
+ return StructuralError{"unknown Go type"}
+}
+
+func marshalField(out *forkableWriter, v reflect.Value, params fieldParameters) (err error) {
+ if !v.IsValid() {
+ return fmt.Errorf("asn1: cannot marshal nil value")
+ }
+ // If the field is an interface{} then recurse into it.
+ if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 {
+ return marshalField(out, v.Elem(), params)
+ }
+
+ if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty {
+ return
+ }
+
+ if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) {
+ defaultValue := reflect.New(v.Type()).Elem()
+ defaultValue.SetInt(*params.defaultValue)
+
+ if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) {
+ return
+ }
+ }
+
+ // If no default value is given then the zero value for the type is
+ // assumed to be the default value. This isn't obviously the correct
+ // behaviour, but it's what Go has traditionally done.
+ if params.optional && params.defaultValue == nil {
+ if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) {
+ return
+ }
+ }
+
+ if v.Type() == rawValueType {
+ rv := v.Interface().(RawValue)
+ if len(rv.FullBytes) != 0 {
+ _, err = out.Write(rv.FullBytes)
+ } else {
+ err = marshalTagAndLength(out, tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})
+ if err != nil {
+ return
+ }
+ _, err = out.Write(rv.Bytes)
+ }
+ return
+ }
+
+ tag, isCompound, ok := getUniversalType(v.Type())
+ if !ok {
+ err = StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type())}
+ return
+ }
+ class := ClassUniversal
+
+ if params.timeType != 0 && tag != TagUTCTime {
+ return StructuralError{"explicit time type given to non-time member"}
+ }
+
+ // jtasn1 updated to allow slices of strings
+ if params.stringType != 0 && !(tag == TagPrintableString || (v.Kind() == reflect.Slice && tag == 16 && v.Type().Elem().Kind() == reflect.String)) {
+ return StructuralError{"explicit string type given to non-string member"}
+ }
+
+ switch tag {
+ case TagPrintableString:
+ if params.stringType == 0 {
+ // This is a string without an explicit string type. We'll use
+ // a PrintableString if the character set in the string is
+ // sufficiently limited, otherwise we'll use a UTF8String.
+ for _, r := range v.String() {
+ if r >= utf8.RuneSelf || !isPrintable(byte(r)) {
+ if !utf8.ValidString(v.String()) {
+ return errors.New("asn1: string not valid UTF-8")
+ }
+ tag = TagUTF8String
+ break
+ }
+ }
+ } else {
+ tag = params.stringType
+ }
+ case TagUTCTime:
+ if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) {
+ tag = TagGeneralizedTime
+ }
+ }
+
+ if params.set {
+ if tag != TagSequence {
+ return StructuralError{"non sequence tagged as set"}
+ }
+ tag = TagSet
+ }
+
+ tags, body := out.fork()
+
+ err = marshalBody(body, v, params)
+ if err != nil {
+ return
+ }
+
+ bodyLen := body.Len()
+
+ var explicitTag *forkableWriter
+ if params.explicit {
+ explicitTag, tags = tags.fork()
+ }
+
+ if !params.explicit && params.tag != nil {
+ // implicit tag.
+ tag = *params.tag
+ class = ClassContextSpecific
+ }
+
+ err = marshalTagAndLength(tags, tagAndLength{class, tag, bodyLen, isCompound})
+ if err != nil {
+ return
+ }
+
+ if params.explicit {
+ err = marshalTagAndLength(explicitTag, tagAndLength{
+ class: ClassContextSpecific,
+ tag: *params.tag,
+ length: bodyLen + tags.Len(),
+ isCompound: true,
+ })
+ }
+
+ return err
+}
+
+// Marshal returns the ASN.1 encoding of val.
+//
+// In addition to the struct tags recognised by Unmarshal, the following can be
+// used:
+//
+// ia5: causes strings to be marshaled as ASN.1, IA5 strings
+// omitempty: causes empty slices to be skipped
+// printable: causes strings to be marshaled as ASN.1, PrintableString strings.
+// utf8: causes strings to be marshaled as ASN.1, UTF8 strings
+func Marshal(val interface{}) ([]byte, error) {
+ var out bytes.Buffer
+ v := reflect.ValueOf(val)
+ f := newForkableWriter()
+ err := marshalField(f, v, fieldParameters{})
+ if err != nil {
+ return nil, err
+ }
+ _, err = f.writeTo(&out)
+ return out.Bytes(), err
+}
diff --git a/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 0000000..75d4187
--- /dev/null
+++ b/vendor/github.com/jcmturner/gofork/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,98 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2
+
+import (
+ "crypto/hmac"
+ "hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+ return Key64(password, salt, int64(iter), int64(keyLen), h)
+}
+
+// Key64 derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. Key64 uses
+// int64 for the iteration count and key length to allow larger values.
+// The key is derived based on the method described as PBKDF2 with the HMAC
+// variant using the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key64(password, salt []byte, iter, keyLen int64, h func() hash.Hash) []byte {
+ prf := hmac.New(h, password)
+ hashLen := int64(prf.Size())
+ numBlocks := (keyLen + hashLen - 1) / hashLen
+
+ var buf [4]byte
+ dk := make([]byte, 0, numBlocks*hashLen)
+ U := make([]byte, hashLen)
+ for block := int64(1); block <= numBlocks; block++ {
+ // N.B.: || means concatenation, ^ means XOR
+ // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+ // U_1 = PRF(password, salt || uint(i))
+ prf.Reset()
+ prf.Write(salt)
+ buf[0] = byte(block >> 24)
+ buf[1] = byte(block >> 16)
+ buf[2] = byte(block >> 8)
+ buf[3] = byte(block)
+ prf.Write(buf[:4])
+ dk = prf.Sum(dk)
+ T := dk[int64(len(dk))-hashLen:]
+ copy(U, T)
+
+ // U_n = PRF(password, U_(n-1))
+ for n := int64(2); n <= iter; n++ {
+ prf.Reset()
+ prf.Write(U)
+ U = U[:0]
+ U = prf.Sum(U)
+ for x := range U {
+ T[x] ^= U[x]
+ }
+ }
+ }
+ return dk[:keyLen]
+}
diff --git a/vendor/github.com/jinzhu/copier/License b/vendor/github.com/jinzhu/copier/License
new file mode 100644
index 0000000..e2dc538
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/License
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Jinzhu
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/jinzhu/copier/README.md b/vendor/github.com/jinzhu/copier/README.md
new file mode 100644
index 0000000..ec04b4b
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/README.md
@@ -0,0 +1,132 @@
+# Copier
+
+ I am a copier, I copy everything from one to another
+
+[![test status](https://github.com/jinzhu/copier/workflows/tests/badge.svg?branch=master "test status")](https://github.com/jinzhu/copier/actions)
+
+## Features
+
+* Copy from field to field with same name
+* Copy from method to field with same name
+* Copy from field to method with same name
+* Copy from slice to slice
+* Copy from struct to slice
+* Copy from map to map
+* Enforce copying a field with a tag
+* Ignore a field with a tag
+* Deep Copy
+
+## Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/jinzhu/copier"
+)
+
+type User struct {
+ Name string
+ Role string
+ Age int32
+ EmployeCode int64 `copier:"EmployeNum"` // specify field name
+
+ // Explicitly ignored in the destination struct.
+ Salary int
+}
+
+func (user *User) DoubleAge() int32 {
+ return 2 * user.Age
+}
+
+// Tags in the destination Struct provide instructions to copier.Copy to ignore
+// or enforce copying and to panic or return an error if a field was not copied.
+type Employee struct {
+ // Tell copier.Copy to panic if this field is not copied.
+ Name string `copier:"must"`
+
+ // Tell copier.Copy to return an error if this field is not copied.
+ Age int32 `copier:"must,nopanic"`
+
+ // Tell copier.Copy to explicitly ignore copying this field.
+ Salary int `copier:"-"`
+
+ DoubleAge int32
+ EmployeId int64 `copier:"EmployeNum"` // specify field name
+ SuperRole string
+}
+
+func (employee *Employee) Role(role string) {
+ employee.SuperRole = "Super " + role
+}
+
+func main() {
+ var (
+ user = User{Name: "Jinzhu", Age: 18, Role: "Admin", Salary: 200000}
+ users = []User{{Name: "Jinzhu", Age: 18, Role: "Admin", Salary: 100000}, {Name: "jinzhu 2", Age: 30, Role: "Dev", Salary: 60000}}
+ employee = Employee{Salary: 150000}
+ employees = []Employee{}
+ )
+
+ copier.Copy(&employee, &user)
+
+ fmt.Printf("%#v \n", employee)
+ // Employee{
+ // Name: "Jinzhu", // Copy from field
+ // Age: 18, // Copy from field
+ // Salary:150000, // Copying explicitly ignored
+ // DoubleAge: 36, // Copy from method
+ // EmployeeId: 0, // Ignored
+ // SuperRole: "Super Admin", // Copy to method
+ // }
+
+ // Copy struct to slice
+ copier.Copy(&employees, &user)
+
+ fmt.Printf("%#v \n", employees)
+ // []Employee{
+ // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeId: 0, SuperRole: "Super Admin"}
+ // }
+
+ // Copy slice to slice
+ employees = []Employee{}
+ copier.Copy(&employees, &users)
+
+ fmt.Printf("%#v \n", employees)
+ // []Employee{
+ // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeId: 0, SuperRole: "Super Admin"},
+ // {Name: "jinzhu 2", Age: 30, Salary:0, DoubleAge: 60, EmployeId: 0, SuperRole: "Super Dev"},
+ // }
+
+ // Copy map to map
+ map1 := map[int]int{3: 6, 4: 8}
+ map2 := map[int32]int8{}
+ copier.Copy(&map2, map1)
+
+ fmt.Printf("%#v \n", map2)
+ // map[int32]int8{3:6, 4:8}
+}
+```
+
+### Copy with Option
+
+```go
+copier.CopyWithOption(&to, &from, copier.Option{IgnoreEmpty: true, DeepCopy: true})
+```
+
+## Contributing
+
+You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
+
+# Author
+
+**jinzhu**
+
+*
+*
+*
+
+## License
+
+Released under the [MIT License](https://github.com/jinzhu/copier/blob/master/License).
diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go
new file mode 100644
index 0000000..6dc9600
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/copier.go
@@ -0,0 +1,697 @@
+package copier
+
+import (
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+)
+
+// These flags define options for tag handling
+const (
+ // Denotes that a destination field must be copied to. If copying fails then a panic will ensue.
+ tagMust uint8 = 1 << iota
+
+ // Denotes that the program should not panic when the must flag is on and
+ // value is not copied. The program will return an error instead.
+ tagNoPanic
+
+ // Ignore a destination field from being copied to.
+ tagIgnore
+
+ // Denotes that the value as been copied
+ hasCopied
+
+ // Some default converter types for a nicer syntax
+ String string = ""
+ Bool bool = false
+ Int int = 0
+ Float32 float32 = 0
+ Float64 float64 = 0
+)
+
+// Option sets copy options
+type Option struct {
+ // setting this value to true will ignore copying zero values of all the fields, including bools, as well as a
+ // struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go)
+ IgnoreEmpty bool
+ DeepCopy bool
+ Converters []TypeConverter
+}
+
+type TypeConverter struct {
+ SrcType interface{}
+ DstType interface{}
+ Fn func(src interface{}) (interface{}, error)
+}
+
+type converterPair struct {
+ SrcType reflect.Type
+ DstType reflect.Type
+}
+
+// Tag Flags
+type flags struct {
+ BitFlags map[string]uint8
+ SrcNames tagNameMapping
+ DestNames tagNameMapping
+}
+
+// Field Tag name mapping
+type tagNameMapping struct {
+ FieldNameToTag map[string]string
+ TagToFieldName map[string]string
+}
+
+// Copy copy things
+func Copy(toValue interface{}, fromValue interface{}) (err error) {
+ return copier(toValue, fromValue, Option{})
+}
+
+// CopyWithOption copy with option
+func CopyWithOption(toValue interface{}, fromValue interface{}, opt Option) (err error) {
+ return copier(toValue, fromValue, opt)
+}
+
+func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) {
+ var (
+ isSlice bool
+ amount = 1
+ from = indirect(reflect.ValueOf(fromValue))
+ to = indirect(reflect.ValueOf(toValue))
+ converters map[converterPair]TypeConverter
+ )
+
+ // save convertes into map for faster lookup
+ for i := range opt.Converters {
+ if converters == nil {
+ converters = make(map[converterPair]TypeConverter)
+ }
+
+ pair := converterPair{
+ SrcType: reflect.TypeOf(opt.Converters[i].SrcType),
+ DstType: reflect.TypeOf(opt.Converters[i].DstType),
+ }
+
+ converters[pair] = opt.Converters[i]
+ }
+
+ if !to.CanAddr() {
+ return ErrInvalidCopyDestination
+ }
+
+ // Return is from value is invalid
+ if !from.IsValid() {
+ return ErrInvalidCopyFrom
+ }
+
+ fromType, isPtrFrom := indirectType(from.Type())
+ toType, _ := indirectType(to.Type())
+
+ if fromType.Kind() == reflect.Interface {
+ fromType = reflect.TypeOf(from.Interface())
+ }
+
+ if toType.Kind() == reflect.Interface {
+ toType, _ = indirectType(reflect.TypeOf(to.Interface()))
+ oldTo := to
+ to = reflect.New(reflect.TypeOf(to.Interface())).Elem()
+ defer func() {
+ oldTo.Set(to)
+ }()
+ }
+
+ // Just set it if possible to assign for normal types
+ if from.Kind() != reflect.Slice && from.Kind() != reflect.Struct && from.Kind() != reflect.Map && (from.Type().AssignableTo(to.Type()) || from.Type().ConvertibleTo(to.Type())) {
+ if !isPtrFrom || !opt.DeepCopy {
+ to.Set(from.Convert(to.Type()))
+ } else {
+ fromCopy := reflect.New(from.Type())
+ fromCopy.Set(from.Elem())
+ to.Set(fromCopy.Convert(to.Type()))
+ }
+ return
+ }
+
+ if from.Kind() != reflect.Slice && fromType.Kind() == reflect.Map && toType.Kind() == reflect.Map {
+ if !fromType.Key().ConvertibleTo(toType.Key()) {
+ return ErrMapKeyNotMatch
+ }
+
+ if to.IsNil() {
+ to.Set(reflect.MakeMapWithSize(toType, from.Len()))
+ }
+
+ for _, k := range from.MapKeys() {
+ toKey := indirect(reflect.New(toType.Key()))
+ if !set(toKey, k, opt.DeepCopy, converters) {
+ return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key())
+ }
+
+ elemType := toType.Elem()
+ if elemType.Kind() != reflect.Slice {
+ elemType, _ = indirectType(elemType)
+ }
+ toValue := indirect(reflect.New(elemType))
+ if !set(toValue, from.MapIndex(k), opt.DeepCopy, converters) {
+ if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil {
+ return err
+ }
+ }
+
+ for {
+ if elemType == toType.Elem() {
+ to.SetMapIndex(toKey, toValue)
+ break
+ }
+ elemType = reflect.PtrTo(elemType)
+ toValue = toValue.Addr()
+ }
+ }
+ return
+ }
+
+ if from.Kind() == reflect.Slice && to.Kind() == reflect.Slice && fromType.ConvertibleTo(toType) {
+ if to.IsNil() {
+ slice := reflect.MakeSlice(reflect.SliceOf(to.Type().Elem()), from.Len(), from.Cap())
+ to.Set(slice)
+ }
+
+ for i := 0; i < from.Len(); i++ {
+ if to.Len() < i+1 {
+ to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem()))
+ }
+
+ if !set(to.Index(i), from.Index(i), opt.DeepCopy, converters) {
+ // ignore error while copy slice element
+ err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt)
+ if err != nil {
+ continue
+ }
+ }
+ }
+ return
+ }
+
+ if fromType.Kind() != reflect.Struct || toType.Kind() != reflect.Struct {
+ // skip not supported type
+ return
+ }
+
+ if from.Kind() == reflect.Slice || to.Kind() == reflect.Slice {
+ isSlice = true
+ if from.Kind() == reflect.Slice {
+ amount = from.Len()
+ }
+ }
+
+ for i := 0; i < amount; i++ {
+ var dest, source reflect.Value
+
+ if isSlice {
+ // source
+ if from.Kind() == reflect.Slice {
+ source = indirect(from.Index(i))
+ } else {
+ source = indirect(from)
+ }
+ // dest
+ dest = indirect(reflect.New(toType).Elem())
+ } else {
+ source = indirect(from)
+ dest = indirect(to)
+ }
+
+ destKind := dest.Kind()
+ initDest := false
+ if destKind == reflect.Interface {
+ initDest = true
+ dest = indirect(reflect.New(toType))
+ }
+
+ // Get tag options
+ flgs, err := getFlags(dest, source, toType, fromType)
+ if err != nil {
+ return err
+ }
+
+ // check source
+ if source.IsValid() {
+ copyUnexportedStructFields(dest, source)
+
+ // Copy from source field to dest field or method
+ fromTypeFields := deepFields(fromType)
+ for _, field := range fromTypeFields {
+ name := field.Name
+
+ // Get bit flags for field
+ fieldFlags, _ := flgs.BitFlags[name]
+
+ // Check if we should ignore copying
+ if (fieldFlags & tagIgnore) != 0 {
+ continue
+ }
+
+ srcFieldName, destFieldName := getFieldName(name, flgs)
+ if fromField := source.FieldByName(srcFieldName); fromField.IsValid() && !shouldIgnore(fromField, opt.IgnoreEmpty) {
+ // process for nested anonymous field
+ destFieldNotSet := false
+ if f, ok := dest.Type().FieldByName(destFieldName); ok {
+ for idx := range f.Index {
+ destField := dest.FieldByIndex(f.Index[:idx+1])
+
+ if destField.Kind() != reflect.Ptr {
+ continue
+ }
+
+ if !destField.IsNil() {
+ continue
+ }
+ if !destField.CanSet() {
+ destFieldNotSet = true
+ break
+ }
+
+ // destField is a nil pointer that can be set
+ newValue := reflect.New(destField.Type().Elem())
+ destField.Set(newValue)
+ }
+ }
+
+ if destFieldNotSet {
+ break
+ }
+
+ toField := dest.FieldByName(destFieldName)
+ if toField.IsValid() {
+ if toField.CanSet() {
+ if !set(toField, fromField, opt.DeepCopy, converters) {
+ if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil {
+ return err
+ }
+ }
+ if fieldFlags != 0 {
+ // Note that a copy was made
+ flgs.BitFlags[name] = fieldFlags | hasCopied
+ }
+ }
+ } else {
+ // try to set to method
+ var toMethod reflect.Value
+ if dest.CanAddr() {
+ toMethod = dest.Addr().MethodByName(destFieldName)
+ } else {
+ toMethod = dest.MethodByName(destFieldName)
+ }
+
+ if toMethod.IsValid() && toMethod.Type().NumIn() == 1 && fromField.Type().AssignableTo(toMethod.Type().In(0)) {
+ toMethod.Call([]reflect.Value{fromField})
+ }
+ }
+ }
+ }
+
+ // Copy from from method to dest field
+ for _, field := range deepFields(toType) {
+ name := field.Name
+ srcFieldName, destFieldName := getFieldName(name, flgs)
+
+ var fromMethod reflect.Value
+ if source.CanAddr() {
+ fromMethod = source.Addr().MethodByName(srcFieldName)
+ } else {
+ fromMethod = source.MethodByName(srcFieldName)
+ }
+
+ if fromMethod.IsValid() && fromMethod.Type().NumIn() == 0 && fromMethod.Type().NumOut() == 1 && !shouldIgnore(fromMethod, opt.IgnoreEmpty) {
+ if toField := dest.FieldByName(destFieldName); toField.IsValid() && toField.CanSet() {
+ values := fromMethod.Call([]reflect.Value{})
+ if len(values) >= 1 {
+ set(toField, values[0], opt.DeepCopy, converters)
+ }
+ }
+ }
+ }
+ }
+
+ if isSlice && to.Kind() == reflect.Slice {
+ if dest.Addr().Type().AssignableTo(to.Type().Elem()) {
+ if to.Len() < i+1 {
+ to.Set(reflect.Append(to, dest.Addr()))
+ } else {
+ if !set(to.Index(i), dest.Addr(), opt.DeepCopy, converters) {
+ // ignore error while copy slice element
+ err = copier(to.Index(i).Addr().Interface(), dest.Addr().Interface(), opt)
+ if err != nil {
+ continue
+ }
+ }
+ }
+ } else if dest.Type().AssignableTo(to.Type().Elem()) {
+ if to.Len() < i+1 {
+ to.Set(reflect.Append(to, dest))
+ } else {
+ if !set(to.Index(i), dest, opt.DeepCopy, converters) {
+ // ignore error while copy slice element
+ err = copier(to.Index(i).Addr().Interface(), dest.Interface(), opt)
+ if err != nil {
+ continue
+ }
+ }
+ }
+ }
+ } else if initDest {
+ to.Set(dest)
+ }
+
+ err = checkBitFlags(flgs.BitFlags)
+ }
+
+ return
+}
+
+func copyUnexportedStructFields(to, from reflect.Value) {
+ if from.Kind() != reflect.Struct || to.Kind() != reflect.Struct || !from.Type().AssignableTo(to.Type()) {
+ return
+ }
+
+ // create a shallow copy of 'to' to get all fields
+ tmp := indirect(reflect.New(to.Type()))
+ tmp.Set(from)
+
+ // revert exported fields
+ for i := 0; i < to.NumField(); i++ {
+ if tmp.Field(i).CanSet() {
+ tmp.Field(i).Set(to.Field(i))
+ }
+ }
+ to.Set(tmp)
+}
+
+func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool {
+ if !ignoreEmpty {
+ return false
+ }
+
+ return v.IsZero()
+}
+
+func deepFields(reflectType reflect.Type) []reflect.StructField {
+ if reflectType, _ = indirectType(reflectType); reflectType.Kind() == reflect.Struct {
+ fields := make([]reflect.StructField, 0, reflectType.NumField())
+
+ for i := 0; i < reflectType.NumField(); i++ {
+ v := reflectType.Field(i)
+ // PkgPath is the package path that qualifies a lower case (unexported)
+ // field name. It is empty for upper case (exported) field names.
+ // See https://golang.org/ref/spec#Uniqueness_of_identifiers
+ if v.PkgPath == "" {
+ fields = append(fields, v)
+ if v.Anonymous {
+ // also consider fields of anonymous fields as fields of the root
+ fields = append(fields, deepFields(v.Type)...)
+ }
+ }
+ }
+
+ return fields
+ }
+
+ return nil
+}
+
+func indirect(reflectValue reflect.Value) reflect.Value {
+ for reflectValue.Kind() == reflect.Ptr {
+ reflectValue = reflectValue.Elem()
+ }
+ return reflectValue
+}
+
+func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) {
+ for reflectType.Kind() == reflect.Ptr || reflectType.Kind() == reflect.Slice {
+ reflectType = reflectType.Elem()
+ isPtr = true
+ }
+ return reflectType, isPtr
+}
+
+func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) bool {
+ if from.IsValid() {
+ if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil {
+ return false
+ } else if ok {
+ return true
+ }
+
+ if to.Kind() == reflect.Ptr {
+ // set `to` to nil if from is nil
+ if from.Kind() == reflect.Ptr && from.IsNil() {
+ to.Set(reflect.Zero(to.Type()))
+ return true
+ } else if to.IsNil() {
+ // `from` -> `to`
+ // sql.NullString -> *string
+ if fromValuer, ok := driverValuer(from); ok {
+ v, err := fromValuer.Value()
+ if err != nil {
+ return false
+ }
+ // if `from` is not valid do nothing with `to`
+ if v == nil {
+ return true
+ }
+ }
+ // allocate new `to` variable with default value (eg. *string -> new(string))
+ to.Set(reflect.New(to.Type().Elem()))
+ }
+ // depointer `to`
+ to = to.Elem()
+ }
+
+ if deepCopy {
+ toKind := to.Kind()
+ if toKind == reflect.Interface && to.IsNil() {
+ if reflect.TypeOf(from.Interface()) != nil {
+ to.Set(reflect.New(reflect.TypeOf(from.Interface())).Elem())
+ toKind = reflect.TypeOf(to.Interface()).Kind()
+ }
+ }
+ if from.Kind() == reflect.Ptr && from.IsNil() {
+ return true
+ }
+ if toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice {
+ return false
+ }
+ }
+
+ if from.Type().ConvertibleTo(to.Type()) {
+ to.Set(from.Convert(to.Type()))
+ } else if toScanner, ok := to.Addr().Interface().(sql.Scanner); ok {
+ // `from` -> `to`
+ // *string -> sql.NullString
+ if from.Kind() == reflect.Ptr {
+ // if `from` is nil do nothing with `to`
+ if from.IsNil() {
+ return true
+ }
+ // depointer `from`
+ from = indirect(from)
+ }
+ // `from` -> `to`
+ // string -> sql.NullString
+ // set `to` by invoking method Scan(`from`)
+ err := toScanner.Scan(from.Interface())
+ if err != nil {
+ return false
+ }
+ } else if fromValuer, ok := driverValuer(from); ok {
+ // `from` -> `to`
+ // sql.NullString -> string
+ v, err := fromValuer.Value()
+ if err != nil {
+ return false
+ }
+ // if `from` is not valid do nothing with `to`
+ if v == nil {
+ return true
+ }
+ rv := reflect.ValueOf(v)
+ if rv.Type().AssignableTo(to.Type()) {
+ to.Set(rv)
+ }
+ } else if from.Kind() == reflect.Ptr {
+ return set(to, from.Elem(), deepCopy, converters)
+ } else {
+ return false
+ }
+ }
+
+ return true
+}
+
+// lookupAndCopyWithConverter looks up the type pair, on success the TypeConverter Fn func is called to copy src to dst field.
+func lookupAndCopyWithConverter(to, from reflect.Value, converters map[converterPair]TypeConverter) (copied bool, err error) {
+ pair := converterPair{
+ SrcType: from.Type(),
+ DstType: to.Type(),
+ }
+
+ if cnv, ok := converters[pair]; ok {
+ result, err := cnv.Fn(from.Interface())
+
+ if err != nil {
+ return false, err
+ }
+
+ if result != nil {
+ to.Set(reflect.ValueOf(result))
+ } else {
+ // in case we've got a nil value to copy
+ to.Set(reflect.Zero(to.Type()))
+ }
+
+ return true, nil
+ }
+
+ return false, nil
+}
+
+// parseTags Parses struct tags and returns uint8 bit flags.
+func parseTags(tag string) (flg uint8, name string, err error) {
+ for _, t := range strings.Split(tag, ",") {
+ switch t {
+ case "-":
+ flg = tagIgnore
+ return
+ case "must":
+ flg = flg | tagMust
+ case "nopanic":
+ flg = flg | tagNoPanic
+ default:
+ if unicode.IsUpper([]rune(t)[0]) {
+ name = strings.TrimSpace(t)
+ } else {
+ err = errors.New("copier field name tag must be start upper case")
+ }
+ }
+ }
+ return
+}
+
+// getTagFlags Parses struct tags for bit flags, field name.
+func getFlags(dest, src reflect.Value, toType, fromType reflect.Type) (flags, error) {
+ flgs := flags{
+ BitFlags: map[string]uint8{},
+ SrcNames: tagNameMapping{
+ FieldNameToTag: map[string]string{},
+ TagToFieldName: map[string]string{},
+ },
+ DestNames: tagNameMapping{
+ FieldNameToTag: map[string]string{},
+ TagToFieldName: map[string]string{},
+ },
+ }
+ var toTypeFields, fromTypeFields []reflect.StructField
+ if dest.IsValid() {
+ toTypeFields = deepFields(toType)
+ }
+ if src.IsValid() {
+ fromTypeFields = deepFields(fromType)
+ }
+
+ // Get a list dest of tags
+ for _, field := range toTypeFields {
+ tags := field.Tag.Get("copier")
+ if tags != "" {
+ var name string
+ var err error
+ if flgs.BitFlags[field.Name], name, err = parseTags(tags); err != nil {
+ return flags{}, err
+ } else if name != "" {
+ flgs.DestNames.FieldNameToTag[field.Name] = name
+ flgs.DestNames.TagToFieldName[name] = field.Name
+ }
+ }
+ }
+
+ // Get a list source of tags
+ for _, field := range fromTypeFields {
+ tags := field.Tag.Get("copier")
+ if tags != "" {
+ var name string
+ var err error
+ if _, name, err = parseTags(tags); err != nil {
+ return flags{}, err
+ } else if name != "" {
+ flgs.SrcNames.FieldNameToTag[field.Name] = name
+ flgs.SrcNames.TagToFieldName[name] = field.Name
+ }
+ }
+ }
+ return flgs, nil
+}
+
+// checkBitFlags Checks flags for error or panic conditions.
+func checkBitFlags(flagsList map[string]uint8) (err error) {
+ // Check flag conditions were met
+ for name, flgs := range flagsList {
+ if flgs&hasCopied == 0 {
+ switch {
+ case flgs&tagMust != 0 && flgs&tagNoPanic != 0:
+ err = fmt.Errorf("field %s has must tag but was not copied", name)
+ return
+ case flgs&(tagMust) != 0:
+ panic(fmt.Sprintf("Field %s has must tag but was not copied", name))
+ }
+ }
+ }
+ return
+}
+
+func getFieldName(fieldName string, flgs flags) (srcFieldName string, destFieldName string) {
+ // get dest field name
+ if srcTagName, ok := flgs.SrcNames.FieldNameToTag[fieldName]; ok {
+ destFieldName = srcTagName
+ if destTagName, ok := flgs.DestNames.TagToFieldName[srcTagName]; ok {
+ destFieldName = destTagName
+ }
+ } else {
+ if destTagName, ok := flgs.DestNames.TagToFieldName[fieldName]; ok {
+ destFieldName = destTagName
+ }
+ }
+ if destFieldName == "" {
+ destFieldName = fieldName
+ }
+
+ // get source field name
+ if destTagName, ok := flgs.DestNames.FieldNameToTag[fieldName]; ok {
+ srcFieldName = destTagName
+ if srcField, ok := flgs.SrcNames.TagToFieldName[destTagName]; ok {
+ srcFieldName = srcField
+ }
+ } else {
+ if srcField, ok := flgs.SrcNames.TagToFieldName[fieldName]; ok {
+ srcFieldName = srcField
+ }
+ }
+
+ if srcFieldName == "" {
+ srcFieldName = fieldName
+ }
+ return
+}
+
+func driverValuer(v reflect.Value) (i driver.Valuer, ok bool) {
+
+ if !v.CanAddr() {
+ i, ok = v.Interface().(driver.Valuer)
+ return
+ }
+
+ i, ok = v.Addr().Interface().(driver.Valuer)
+ return
+}
diff --git a/vendor/github.com/jinzhu/copier/errors.go b/vendor/github.com/jinzhu/copier/errors.go
new file mode 100644
index 0000000..cf7c5e7
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/errors.go
@@ -0,0 +1,10 @@
+package copier
+
+import "errors"
+
+var (
+ ErrInvalidCopyDestination = errors.New("copy destination is invalid")
+ ErrInvalidCopyFrom = errors.New("copy from is invalid")
+ ErrMapKeyNotMatch = errors.New("map's key type doesn't match")
+ ErrNotSupported = errors.New("not supported")
+)
diff --git a/vendor/github.com/jinzhu/copier/go.mod b/vendor/github.com/jinzhu/copier/go.mod
new file mode 100644
index 0000000..309801e
--- /dev/null
+++ b/vendor/github.com/jinzhu/copier/go.mod
@@ -0,0 +1,3 @@
+module github.com/jinzhu/copier
+
+go 1.13
diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE
new file mode 100644
index 0000000..a1ca9a0
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 - Jinzhu
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md
new file mode 100644
index 0000000..a3de336
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/README.md
@@ -0,0 +1,55 @@
+# Inflection
+
+Inflection pluralizes and singularizes English nouns
+
+[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930)
+
+## Basic Usage
+
+```go
+inflection.Plural("person") => "people"
+inflection.Plural("Person") => "People"
+inflection.Plural("PERSON") => "PEOPLE"
+inflection.Plural("bus") => "buses"
+inflection.Plural("BUS") => "BUSES"
+inflection.Plural("Bus") => "Buses"
+
+inflection.Singular("people") => "person"
+inflection.Singular("People") => "Person"
+inflection.Singular("PEOPLE") => "PERSON"
+inflection.Singular("buses") => "bus"
+inflection.Singular("BUSES") => "BUS"
+inflection.Singular("Buses") => "Bus"
+
+inflection.Plural("FancyPerson") => "FancyPeople"
+inflection.Singular("FancyPeople") => "FancyPerson"
+```
+
+## Register Rules
+
+Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
+
+If you want to register more rules, follow:
+
+```
+inflection.AddUncountable("fish")
+inflection.AddIrregular("person", "people")
+inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
+inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
+```
+
+## Contributing
+
+You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
+
+## Author
+
+**jinzhu**
+
+*
+*
+*
+
+## License
+
+Released under the [MIT License](http://www.opensource.org/licenses/MIT).
diff --git a/vendor/github.com/jinzhu/inflection/go.mod b/vendor/github.com/jinzhu/inflection/go.mod
new file mode 100644
index 0000000..2fb7690
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/go.mod
@@ -0,0 +1 @@
+module github.com/jinzhu/inflection
diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go
new file mode 100644
index 0000000..606263b
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/inflections.go
@@ -0,0 +1,273 @@
+/*
+Package inflection pluralizes and singularizes English nouns.
+
+ inflection.Plural("person") => "people"
+ inflection.Plural("Person") => "People"
+ inflection.Plural("PERSON") => "PEOPLE"
+
+ inflection.Singular("people") => "person"
+ inflection.Singular("People") => "Person"
+ inflection.Singular("PEOPLE") => "PERSON"
+
+ inflection.Plural("FancyPerson") => "FancydPeople"
+ inflection.Singular("FancyPeople") => "FancydPerson"
+
+Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb)
+
+If you want to register more rules, follow:
+
+ inflection.AddUncountable("fish")
+ inflection.AddIrregular("person", "people")
+ inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses"
+ inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS"
+*/
+package inflection
+
+import (
+ "regexp"
+ "strings"
+)
+
+type inflection struct {
+ regexp *regexp.Regexp
+ replace string
+}
+
+// Regular is a regexp find replace inflection
+type Regular struct {
+ find string
+ replace string
+}
+
+// Irregular is a hard replace inflection,
+// containing both singular and plural forms
+type Irregular struct {
+ singular string
+ plural string
+}
+
+// RegularSlice is a slice of Regular inflections
+type RegularSlice []Regular
+
+// IrregularSlice is a slice of Irregular inflections
+type IrregularSlice []Irregular
+
+var pluralInflections = RegularSlice{
+ {"([a-z])$", "${1}s"},
+ {"s$", "s"},
+ {"^(ax|test)is$", "${1}es"},
+ {"(octop|vir)us$", "${1}i"},
+ {"(octop|vir)i$", "${1}i"},
+ {"(alias|status)$", "${1}es"},
+ {"(bu)s$", "${1}ses"},
+ {"(buffal|tomat)o$", "${1}oes"},
+ {"([ti])um$", "${1}a"},
+ {"([ti])a$", "${1}a"},
+ {"sis$", "ses"},
+ {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"},
+ {"(hive)$", "${1}s"},
+ {"([^aeiouy]|qu)y$", "${1}ies"},
+ {"(x|ch|ss|sh)$", "${1}es"},
+ {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"},
+ {"^(m|l)ouse$", "${1}ice"},
+ {"^(m|l)ice$", "${1}ice"},
+ {"^(ox)$", "${1}en"},
+ {"^(oxen)$", "${1}"},
+ {"(quiz)$", "${1}zes"},
+}
+
+var singularInflections = RegularSlice{
+ {"s$", ""},
+ {"(ss)$", "${1}"},
+ {"(n)ews$", "${1}ews"},
+ {"([ti])a$", "${1}um"},
+ {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"},
+ {"(^analy)(sis|ses)$", "${1}sis"},
+ {"([^f])ves$", "${1}fe"},
+ {"(hive)s$", "${1}"},
+ {"(tive)s$", "${1}"},
+ {"([lr])ves$", "${1}f"},
+ {"([^aeiouy]|qu)ies$", "${1}y"},
+ {"(s)eries$", "${1}eries"},
+ {"(m)ovies$", "${1}ovie"},
+ {"(c)ookies$", "${1}ookie"},
+ {"(x|ch|ss|sh)es$", "${1}"},
+ {"^(m|l)ice$", "${1}ouse"},
+ {"(bus)(es)?$", "${1}"},
+ {"(o)es$", "${1}"},
+ {"(shoe)s$", "${1}"},
+ {"(cris|test)(is|es)$", "${1}is"},
+ {"^(a)x[ie]s$", "${1}xis"},
+ {"(octop|vir)(us|i)$", "${1}us"},
+ {"(alias|status)(es)?$", "${1}"},
+ {"^(ox)en", "${1}"},
+ {"(vert|ind)ices$", "${1}ex"},
+ {"(matr)ices$", "${1}ix"},
+ {"(quiz)zes$", "${1}"},
+ {"(database)s$", "${1}"},
+}
+
+var irregularInflections = IrregularSlice{
+ {"person", "people"},
+ {"man", "men"},
+ {"child", "children"},
+ {"sex", "sexes"},
+ {"move", "moves"},
+ {"mombie", "mombies"},
+}
+
+var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"}
+
+var compiledPluralMaps []inflection
+var compiledSingularMaps []inflection
+
+func compile() {
+ compiledPluralMaps = []inflection{}
+ compiledSingularMaps = []inflection{}
+ for _, uncountable := range uncountableInflections {
+ inf := inflection{
+ regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"),
+ replace: "${1}",
+ }
+ compiledPluralMaps = append(compiledPluralMaps, inf)
+ compiledSingularMaps = append(compiledSingularMaps, inf)
+ }
+
+ for _, value := range irregularInflections {
+ infs := []inflection{
+ inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)},
+ inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)},
+ inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural},
+ }
+ compiledPluralMaps = append(compiledPluralMaps, infs...)
+ }
+
+ for _, value := range irregularInflections {
+ infs := []inflection{
+ inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)},
+ inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)},
+ inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular},
+ }
+ compiledSingularMaps = append(compiledSingularMaps, infs...)
+ }
+
+ for i := len(pluralInflections) - 1; i >= 0; i-- {
+ value := pluralInflections[i]
+ infs := []inflection{
+ inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
+ inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
+ inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
+ }
+ compiledPluralMaps = append(compiledPluralMaps, infs...)
+ }
+
+ for i := len(singularInflections) - 1; i >= 0; i-- {
+ value := singularInflections[i]
+ infs := []inflection{
+ inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)},
+ inflection{regexp: regexp.MustCompile(value.find), replace: value.replace},
+ inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace},
+ }
+ compiledSingularMaps = append(compiledSingularMaps, infs...)
+ }
+}
+
+func init() {
+ compile()
+}
+
+// AddPlural adds a plural inflection
+func AddPlural(find, replace string) {
+ pluralInflections = append(pluralInflections, Regular{find, replace})
+ compile()
+}
+
+// AddSingular adds a singular inflection
+func AddSingular(find, replace string) {
+ singularInflections = append(singularInflections, Regular{find, replace})
+ compile()
+}
+
+// AddIrregular adds an irregular inflection
+func AddIrregular(singular, plural string) {
+ irregularInflections = append(irregularInflections, Irregular{singular, plural})
+ compile()
+}
+
+// AddUncountable adds an uncountable inflection
+func AddUncountable(values ...string) {
+ uncountableInflections = append(uncountableInflections, values...)
+ compile()
+}
+
+// GetPlural retrieves the plural inflection values
+func GetPlural() RegularSlice {
+ plurals := make(RegularSlice, len(pluralInflections))
+ copy(plurals, pluralInflections)
+ return plurals
+}
+
+// GetSingular retrieves the singular inflection values
+func GetSingular() RegularSlice {
+ singulars := make(RegularSlice, len(singularInflections))
+ copy(singulars, singularInflections)
+ return singulars
+}
+
+// GetIrregular retrieves the irregular inflection values
+func GetIrregular() IrregularSlice {
+ irregular := make(IrregularSlice, len(irregularInflections))
+ copy(irregular, irregularInflections)
+ return irregular
+}
+
+// GetUncountable retrieves the uncountable inflection values
+func GetUncountable() []string {
+ uncountables := make([]string, len(uncountableInflections))
+ copy(uncountables, uncountableInflections)
+ return uncountables
+}
+
+// SetPlural sets the plural inflections slice
+func SetPlural(inflections RegularSlice) {
+ pluralInflections = inflections
+ compile()
+}
+
+// SetSingular sets the singular inflections slice
+func SetSingular(inflections RegularSlice) {
+ singularInflections = inflections
+ compile()
+}
+
+// SetIrregular sets the irregular inflections slice
+func SetIrregular(inflections IrregularSlice) {
+ irregularInflections = inflections
+ compile()
+}
+
+// SetUncountable sets the uncountable inflections slice
+func SetUncountable(inflections []string) {
+ uncountableInflections = inflections
+ compile()
+}
+
+// Plural converts a word to its plural form
+func Plural(str string) string {
+ for _, inflection := range compiledPluralMaps {
+ if inflection.regexp.MatchString(str) {
+ return inflection.regexp.ReplaceAllString(str, inflection.replace)
+ }
+ }
+ return str
+}
+
+// Singular converts a word to its singular form
+func Singular(str string) string {
+ for _, inflection := range compiledSingularMaps {
+ if inflection.regexp.MatchString(str) {
+ return inflection.regexp.ReplaceAllString(str, inflection.replace)
+ }
+ }
+ return str
+}
diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml
new file mode 100644
index 0000000..5e6ce98
--- /dev/null
+++ b/vendor/github.com/jinzhu/inflection/wercker.yml
@@ -0,0 +1,23 @@
+box: golang
+
+build:
+ steps:
+ - setup-go-workspace
+
+ # Gets the dependencies
+ - script:
+ name: go get
+ code: |
+ go get
+
+ # Build the project
+ - script:
+ name: go build
+ code: |
+ go build ./...
+
+ # Test the project
+ - script:
+ name: go test
+ code: |
+ go test ./...
diff --git a/vendor/github.com/jinzhu/now/Guardfile b/vendor/github.com/jinzhu/now/Guardfile
new file mode 100644
index 0000000..0b860b0
--- /dev/null
+++ b/vendor/github.com/jinzhu/now/Guardfile
@@ -0,0 +1,3 @@
+guard 'gotest' do
+ watch(%r{\.go$})
+end
diff --git a/vendor/github.com/jinzhu/now/License b/vendor/github.com/jinzhu/now/License
new file mode 100644
index 0000000..037e165
--- /dev/null
+++ b/vendor/github.com/jinzhu/now/License
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013-NOW Jinzhu
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/jinzhu/now/README.md b/vendor/github.com/jinzhu/now/README.md
new file mode 100644
index 0000000..a1fc4da
--- /dev/null
+++ b/vendor/github.com/jinzhu/now/README.md
@@ -0,0 +1,133 @@
+## Now
+
+Now is a time toolkit for golang
+
+[![go report card](https://goreportcard.com/badge/github.com/jinzhu/now "go report card")](https://goreportcard.com/report/github.com/jinzhu/now)
+[![test status](https://github.com/jinzhu/now/workflows/tests/badge.svg?branch=master "test status")](https://github.com/jinzhu/now/actions)
+[![MIT license](https://img.shields.io/badge/license-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT)
+
+## Install
+
+```
+go get -u github.com/jinzhu/now
+```
+
+## Usage
+
+Calculating time based on current time
+
+```go
+import "github.com/jinzhu/now"
+
+time.Now() // 2013-11-18 17:51:49.123456789 Mon
+
+now.BeginningOfMinute() // 2013-11-18 17:51:00 Mon
+now.BeginningOfHour() // 2013-11-18 17:00:00 Mon
+now.BeginningOfDay() // 2013-11-18 00:00:00 Mon
+now.BeginningOfWeek() // 2013-11-17 00:00:00 Sun
+now.BeginningOfMonth() // 2013-11-01 00:00:00 Fri
+now.BeginningOfQuarter() // 2013-10-01 00:00:00 Tue
+now.BeginningOfYear() // 2013-01-01 00:00:00 Tue
+
+now.EndOfMinute() // 2013-11-18 17:51:59.999999999 Mon
+now.EndOfHour() // 2013-11-18 17:59:59.999999999 Mon
+now.EndOfDay() // 2013-11-18 23:59:59.999999999 Mon
+now.EndOfWeek() // 2013-11-23 23:59:59.999999999 Sat
+now.EndOfMonth() // 2013-11-30 23:59:59.999999999 Sat
+now.EndOfQuarter() // 2013-12-31 23:59:59.999999999 Tue
+now.EndOfYear() // 2013-12-31 23:59:59.999999999 Tue
+
+now.WeekStartDay = time.Monday // Set Monday as first day, default is Sunday
+now.EndOfWeek() // 2013-11-24 23:59:59.999999999 Sun
+```
+
+Calculating time based on another time
+
+```go
+t := time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.Now().Location())
+now.With(t).EndOfMonth() // 2013-02-28 23:59:59.999999999 Thu
+```
+
+Calculating time based on configuration
+
+```go
+location, err := time.LoadLocation("Asia/Shanghai")
+
+myConfig := &now.Config{
+ WeekStartDay: time.Monday,
+ TimeLocation: location,
+ TimeFormats: []string{"2006-01-02 15:04:05"},
+}
+
+t := time.Date(2013, 11, 18, 17, 51, 49, 123456789, time.Now().Location()) // // 2013-11-18 17:51:49.123456789 Mon
+myConfig.With(t).BeginningOfWeek() // 2013-11-18 00:00:00 Mon
+
+myConfig.Parse("2002-10-12 22:14:01") // 2002-10-12 22:14:01
+myConfig.Parse("2002-10-12 22:14") // returns error 'can't parse string as time: 2002-10-12 22:14'
+```
+
+### Monday/Sunday
+
+Don't be bothered with the `WeekStartDay` setting, you can use `Monday`, `Sunday`
+
+```go
+now.Monday() // 2013-11-18 00:00:00 Mon
+now.Sunday() // 2013-11-24 00:00:00 Sun (Next Sunday)
+now.EndOfSunday() // 2013-11-24 23:59:59.999999999 Sun (End of next Sunday)
+
+t := time.Date(2013, 11, 24, 17, 51, 49, 123456789, time.Now().Location()) // 2013-11-24 17:51:49.123456789 Sun
+now.With(t).Monday() // 2013-11-18 00:00:00 Sun (Last Monday if today is Sunday)
+now.With(t).Sunday() // 2013-11-24 00:00:00 Sun (Beginning Of Today if today is Sunday)
+now.With(t).EndOfSunday() // 2013-11-24 23:59:59.999999999 Sun (End of Today if today is Sunday)
+```
+
+### Parse String to Time
+
+```go
+time.Now() // 2013-11-18 17:51:49.123456789 Mon
+
+// Parse(string) (time.Time, error)
+t, err := now.Parse("2017") // 2017-01-01 00:00:00, nil
+t, err := now.Parse("2017-10") // 2017-10-01 00:00:00, nil
+t, err := now.Parse("2017-10-13") // 2017-10-13 00:00:00, nil
+t, err := now.Parse("1999-12-12 12") // 1999-12-12 12:00:00, nil
+t, err := now.Parse("1999-12-12 12:20") // 1999-12-12 12:20:00, nil
+t, err := now.Parse("1999-12-12 12:20:21") // 1999-12-12 12:20:21, nil
+t, err := now.Parse("10-13") // 2013-10-13 00:00:00, nil
+t, err := now.Parse("12:20") // 2013-11-18 12:20:00, nil
+t, err := now.Parse("12:20:13") // 2013-11-18 12:20:13, nil
+t, err := now.Parse("14") // 2013-11-18 14:00:00, nil
+t, err := now.Parse("99:99") // 2013-11-18 12:20:00, Can't parse string as time: 99:99
+
+// MustParse must parse string to time or it will panic
+now.MustParse("2013-01-13") // 2013-01-13 00:00:00
+now.MustParse("02-17") // 2013-02-17 00:00:00
+now.MustParse("2-17") // 2013-02-17 00:00:00
+now.MustParse("8") // 2013-11-18 08:00:00
+now.MustParse("2002-10-12 22:14") // 2002-10-12 22:14:00
+now.MustParse("99:99") // panic: Can't parse string as time: 99:99
+```
+
+Extend `now` to support more formats is quite easy, just update `now.TimeFormats` with other time layouts, e.g:
+
+```go
+now.TimeFormats = append(now.TimeFormats, "02 Jan 2006 15:04")
+```
+
+Please send me pull requests if you want a format to be supported officially
+
+## Contributing
+
+You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do.
+
+# Author
+
+**jinzhu**
+
+*
+*
+*
+
+## License
+
+Released under the [MIT License](http://www.opensource.org/licenses/MIT).
diff --git a/vendor/github.com/jinzhu/now/go.mod b/vendor/github.com/jinzhu/now/go.mod
new file mode 100644
index 0000000..018d266
--- /dev/null
+++ b/vendor/github.com/jinzhu/now/go.mod
@@ -0,0 +1,3 @@
+module github.com/jinzhu/now
+
+go 1.12
diff --git a/vendor/github.com/jinzhu/now/main.go b/vendor/github.com/jinzhu/now/main.go
new file mode 100644
index 0000000..e972cd8
--- /dev/null
+++ b/vendor/github.com/jinzhu/now/main.go
@@ -0,0 +1,199 @@
+// Package now is a time toolkit for golang.
+//
+// More details README here: https://github.com/jinzhu/now
+//
+// import "github.com/jinzhu/now"
+//
+// now.BeginningOfMinute() // 2013-11-18 17:51:00 Mon
+// now.BeginningOfDay() // 2013-11-18 00:00:00 Mon
+// now.EndOfDay() // 2013-11-18 23:59:59.999999999 Mon
+package now
+
+import "time"
+
+// WeekStartDay set week start day, default is sunday
+var WeekStartDay = time.Sunday
+
+// TimeFormats default time formats will be parsed as
+var TimeFormats = []string{
+ "2006", "2006-1", "2006-1-2", "2006-1-2 15", "2006-1-2 15:4", "2006-1-2 15:4:5", "1-2",
+ "15:4:5", "15:4", "15",
+ "15:4:5 Jan 2, 2006 MST", "2006-01-02 15:04:05.999999999 -0700 MST", "2006-01-02T15:04:05Z0700", "2006-01-02T15:04:05Z07",
+ "2006.1.2", "2006.1.2 15:04:05", "2006.01.02", "2006.01.02 15:04:05", "2006.01.02 15:04:05.999999999",
+ "1/2/2006", "1/2/2006 15:4:5", "2006/01/02", "20060102", "2006/01/02 15:04:05",
+ time.ANSIC, time.UnixDate, time.RubyDate, time.RFC822, time.RFC822Z, time.RFC850,
+ time.RFC1123, time.RFC1123Z, time.RFC3339, time.RFC3339Nano,
+ time.Kitchen, time.Stamp, time.StampMilli, time.StampMicro, time.StampNano,
+}
+
+// Config configuration for now package
+type Config struct {
+ WeekStartDay time.Weekday
+ TimeLocation *time.Location
+ TimeFormats []string
+}
+
+// DefaultConfig default config
+var DefaultConfig *Config
+
+// New initialize Now based on configuration
+func (config *Config) With(t time.Time) *Now {
+ return &Now{Time: t, Config: config}
+}
+
+// Parse parse string to time based on configuration
+func (config *Config) Parse(strs ...string) (time.Time, error) {
+ if config.TimeLocation == nil {
+ return config.With(time.Now()).Parse(strs...)
+ } else {
+ return config.With(time.Now().In(config.TimeLocation)).Parse(strs...)
+ }
+}
+
+// MustParse must parse string to time or will panic
+func (config *Config) MustParse(strs ...string) time.Time {
+ if config.TimeLocation == nil {
+ return config.With(time.Now()).MustParse(strs...)
+ } else {
+ return config.With(time.Now().In(config.TimeLocation)).MustParse(strs...)
+ }
+}
+
+// Now now struct
+type Now struct {
+ time.Time
+ *Config
+}
+
+// With initialize Now with time
+func With(t time.Time) *Now {
+ config := DefaultConfig
+ if config == nil {
+ config = &Config{
+ WeekStartDay: WeekStartDay,
+ TimeFormats: TimeFormats,
+ }
+ }
+
+ return &Now{Time: t, Config: config}
+}
+
+// New initialize Now with time
+func New(t time.Time) *Now {
+ return With(t)
+}
+
+// BeginningOfMinute beginning of minute
+func BeginningOfMinute() time.Time {
+ return With(time.Now()).BeginningOfMinute()
+}
+
+// BeginningOfHour beginning of hour
+func BeginningOfHour() time.Time {
+ return With(time.Now()).BeginningOfHour()
+}
+
+// BeginningOfDay beginning of day
+func BeginningOfDay() time.Time {
+ return With(time.Now()).BeginningOfDay()
+}
+
+// BeginningOfWeek beginning of week
+func BeginningOfWeek() time.Time {
+ return With(time.Now()).BeginningOfWeek()
+}
+
+// BeginningOfMonth beginning of month
+func BeginningOfMonth() time.Time {
+ return With(time.Now()).BeginningOfMonth()
+}
+
+// BeginningOfQuarter beginning of quarter
+func BeginningOfQuarter() time.Time {
+ return With(time.Now()).BeginningOfQuarter()
+}
+
+// BeginningOfYear beginning of year
+func BeginningOfYear() time.Time {
+ return With(time.Now()).BeginningOfYear()
+}
+
+// EndOfMinute end of minute
+func EndOfMinute() time.Time {
+ return With(time.Now()).EndOfMinute()
+}
+
+// EndOfHour end of hour
+func EndOfHour() time.Time {
+ return With(time.Now()).EndOfHour()
+}
+
+// EndOfDay end of day
+func EndOfDay() time.Time {
+ return With(time.Now()).EndOfDay()
+}
+
+// EndOfWeek end of week
+func EndOfWeek() time.Time {
+ return With(time.Now()).EndOfWeek()
+}
+
+// EndOfMonth end of month
+func EndOfMonth() time.Time {
+ return With(time.Now()).EndOfMonth()
+}
+
+// EndOfQuarter end of quarter
+func EndOfQuarter() time.Time {
+ return With(time.Now()).EndOfQuarter()
+}
+
+// EndOfYear end of year
+func EndOfYear() time.Time {
+ return With(time.Now()).EndOfYear()
+}
+
+// Monday monday
+func Monday() time.Time {
+ return With(time.Now()).Monday()
+}
+
+// Sunday sunday
+func Sunday() time.Time {
+ return With(time.Now()).Sunday()
+}
+
+// EndOfSunday end of sunday
+func EndOfSunday() time.Time {
+ return With(time.Now()).EndOfSunday()
+}
+
+// Quarter returns the yearly quarter
+func Quarter() uint {
+ return With(time.Now()).Quarter()
+}
+
+// Parse parse string to time
+func Parse(strs ...string) (time.Time, error) {
+ return With(time.Now()).Parse(strs...)
+}
+
+// ParseInLocation parse string to time in location
+func ParseInLocation(loc *time.Location, strs ...string) (time.Time, error) {
+ return With(time.Now().In(loc)).Parse(strs...)
+}
+
+// MustParse must parse string to time or will panic
+func MustParse(strs ...string) time.Time {
+ return With(time.Now()).MustParse(strs...)
+}
+
+// MustParseInLocation must parse string to time in location or will panic
+func MustParseInLocation(loc *time.Location, strs ...string) time.Time {
+ return With(time.Now().In(loc)).MustParse(strs...)
+}
+
+// Between check now between the begin, end time or not
+func Between(time1, time2 string) bool {
+ return With(time.Now()).Between(time1, time2)
+}
diff --git a/vendor/github.com/jinzhu/now/now.go b/vendor/github.com/jinzhu/now/now.go
new file mode 100644
index 0000000..d1fb8ae
--- /dev/null
+++ b/vendor/github.com/jinzhu/now/now.go
@@ -0,0 +1,217 @@
+package now
+
+import (
+ "errors"
+ "regexp"
+ "time"
+)
+
+// BeginningOfMinute beginning of minute
+func (now *Now) BeginningOfMinute() time.Time {
+ return now.Truncate(time.Minute)
+}
+
+// BeginningOfHour beginning of hour
+func (now *Now) BeginningOfHour() time.Time {
+ y, m, d := now.Date()
+ return time.Date(y, m, d, now.Time.Hour(), 0, 0, 0, now.Time.Location())
+}
+
+// BeginningOfDay beginning of day
+func (now *Now) BeginningOfDay() time.Time {
+ y, m, d := now.Date()
+ return time.Date(y, m, d, 0, 0, 0, 0, now.Time.Location())
+}
+
+// BeginningOfWeek beginning of week
+func (now *Now) BeginningOfWeek() time.Time {
+ t := now.BeginningOfDay()
+ weekday := int(t.Weekday())
+
+ if now.WeekStartDay != time.Sunday {
+ weekStartDayInt := int(now.WeekStartDay)
+
+ if weekday < weekStartDayInt {
+ weekday = weekday + 7 - weekStartDayInt
+ } else {
+ weekday = weekday - weekStartDayInt
+ }
+ }
+ return t.AddDate(0, 0, -weekday)
+}
+
+// BeginningOfMonth beginning of month
+func (now *Now) BeginningOfMonth() time.Time {
+ y, m, _ := now.Date()
+ return time.Date(y, m, 1, 0, 0, 0, 0, now.Location())
+}
+
+// BeginningOfQuarter beginning of quarter
+func (now *Now) BeginningOfQuarter() time.Time {
+ month := now.BeginningOfMonth()
+ offset := (int(month.Month()) - 1) % 3
+ return month.AddDate(0, -offset, 0)
+}
+
+// BeginningOfHalf beginning of half year
+func (now *Now) BeginningOfHalf() time.Time {
+ month := now.BeginningOfMonth()
+ offset := (int(month.Month()) - 1) % 6
+ return month.AddDate(0, -offset, 0)
+}
+
+// BeginningOfYear BeginningOfYear beginning of year
+func (now *Now) BeginningOfYear() time.Time {
+ y, _, _ := now.Date()
+ return time.Date(y, time.January, 1, 0, 0, 0, 0, now.Location())
+}
+
+// EndOfMinute end of minute
+func (now *Now) EndOfMinute() time.Time {
+ return now.BeginningOfMinute().Add(time.Minute - time.Nanosecond)
+}
+
+// EndOfHour end of hour
+func (now *Now) EndOfHour() time.Time {
+ return now.BeginningOfHour().Add(time.Hour - time.Nanosecond)
+}
+
+// EndOfDay end of day
+func (now *Now) EndOfDay() time.Time {
+ y, m, d := now.Date()
+ return time.Date(y, m, d, 23, 59, 59, int(time.Second-time.Nanosecond), now.Location())
+}
+
+// EndOfWeek end of week
+func (now *Now) EndOfWeek() time.Time {
+ return now.BeginningOfWeek().AddDate(0, 0, 7).Add(-time.Nanosecond)
+}
+
+// EndOfMonth end of month
+func (now *Now) EndOfMonth() time.Time {
+ return now.BeginningOfMonth().AddDate(0, 1, 0).Add(-time.Nanosecond)
+}
+
+// EndOfQuarter end of quarter
+func (now *Now) EndOfQuarter() time.Time {
+ return now.BeginningOfQuarter().AddDate(0, 3, 0).Add(-time.Nanosecond)
+}
+
+// EndOfHalf end of half year
+func (now *Now) EndOfHalf() time.Time {
+ return now.BeginningOfHalf().AddDate(0, 6, 0).Add(-time.Nanosecond)
+}
+
+// EndOfYear end of year
+func (now *Now) EndOfYear() time.Time {
+ return now.BeginningOfYear().AddDate(1, 0, 0).Add(-time.Nanosecond)
+}
+
+// Monday monday
+func (now *Now) Monday() time.Time {
+ t := now.BeginningOfDay()
+ weekday := int(t.Weekday())
+ if weekday == 0 {
+ weekday = 7
+ }
+ return t.AddDate(0, 0, -weekday+1)
+}
+
+// Sunday sunday
+func (now *Now) Sunday() time.Time {
+ t := now.BeginningOfDay()
+ weekday := int(t.Weekday())
+ if weekday == 0 {
+ return t
+ }
+ return t.AddDate(0, 0, (7 - weekday))
+}
+
+// EndOfSunday end of sunday
+func (now *Now) EndOfSunday() time.Time {
+ return New(now.Sunday()).EndOfDay()
+}
+
+// Quarter returns the yearly quarter
+func (now *Now) Quarter() uint {
+ return (uint(now.Month())-1)/3 + 1
+}
+
+func (now *Now) parseWithFormat(str string, location *time.Location) (t time.Time, err error) {
+ for _, format := range now.TimeFormats {
+ t, err = time.ParseInLocation(format, str, location)
+
+ if err == nil {
+ return
+ }
+ }
+ err = errors.New("Can't parse string as time: " + str)
+ return
+}
+
+var hasTimeRegexp = regexp.MustCompile(`(\s+|^\s*|T)\d{1,2}((:\d{1,2})*|((:\d{1,2}){2}\.(\d{3}|\d{6}|\d{9})))(\s*$|[Z+-])`) // match 15:04:05, 15:04:05.000, 15:04:05.000000 15, 2017-01-01 15:04, 2021-07-20T00:59:10Z, 2021-07-20T00:59:10+08:00, 2021-07-20T00:00:10-07:00 etc
+var onlyTimeRegexp = regexp.MustCompile(`^\s*\d{1,2}((:\d{1,2})*|((:\d{1,2}){2}\.(\d{3}|\d{6}|\d{9})))\s*$`) // match 15:04:05, 15, 15:04:05.000, 15:04:05.000000, etc
+
+// Parse parse string to time
+func (now *Now) Parse(strs ...string) (t time.Time, err error) {
+ var (
+ setCurrentTime bool
+ parseTime []int
+ currentLocation = now.Location()
+ onlyTimeInStr = true
+ currentTime = formatTimeToList(now.Time)
+ )
+
+ for _, str := range strs {
+ hasTimeInStr := hasTimeRegexp.MatchString(str) // match 15:04:05, 15
+ onlyTimeInStr = hasTimeInStr && onlyTimeInStr && onlyTimeRegexp.MatchString(str)
+ if t, err = now.parseWithFormat(str, currentLocation); err == nil {
+ location := t.Location()
+ parseTime = formatTimeToList(t)
+
+ for i, v := range parseTime {
+ // Don't reset hour, minute, second if current time str including time
+ if hasTimeInStr && i <= 3 {
+ continue
+ }
+
+ // If value is zero, replace it with current time
+ if v == 0 {
+ if setCurrentTime {
+ parseTime[i] = currentTime[i]
+ }
+ } else {
+ setCurrentTime = true
+ }
+
+ // if current time only includes time, should change day, month to current time
+ if onlyTimeInStr {
+ if i == 4 || i == 5 {
+ parseTime[i] = currentTime[i]
+ continue
+ }
+ }
+ }
+
+ t = time.Date(parseTime[6], time.Month(parseTime[5]), parseTime[4], parseTime[3], parseTime[2], parseTime[1], parseTime[0], location)
+ currentTime = formatTimeToList(t)
+ }
+ }
+ return
+}
+
+// MustParse must parse string to time or it will panic
+func (now *Now) MustParse(strs ...string) (t time.Time) {
+ t, err := now.Parse(strs...)
+ if err != nil {
+ panic(err)
+ }
+ return t
+}
+
+// Between check time between the begin, end time or not
+func (now *Now) Between(begin, end string) bool {
+ beginTime := now.MustParse(begin)
+ endTime := now.MustParse(end)
+ return now.After(beginTime) && now.Before(endTime)
+}
diff --git a/vendor/github.com/jinzhu/now/time.go b/vendor/github.com/jinzhu/now/time.go
new file mode 100644
index 0000000..52dd8b2
--- /dev/null
+++ b/vendor/github.com/jinzhu/now/time.go
@@ -0,0 +1,9 @@
+package now
+
+import "time"
+
+func formatTimeToList(t time.Time) []int {
+ hour, min, sec := t.Clock()
+ year, month, day := t.Date()
+ return []int{t.Nanosecond(), sec, min, hour, day, int(month), year}
+}
diff --git a/vendor/github.com/jpillora/backoff/LICENSE b/vendor/github.com/jpillora/backoff/LICENSE
new file mode 100644
index 0000000..1cc7080
--- /dev/null
+++ b/vendor/github.com/jpillora/backoff/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Jaime Pillora
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/jpillora/backoff/README.md b/vendor/github.com/jpillora/backoff/README.md
new file mode 100644
index 0000000..ee4d623
--- /dev/null
+++ b/vendor/github.com/jpillora/backoff/README.md
@@ -0,0 +1,119 @@
+# Backoff
+
+A simple exponential backoff counter in Go (Golang)
+
+[![GoDoc](https://godoc.org/github.com/jpillora/backoff?status.svg)](https://godoc.org/github.com/jpillora/backoff) [![Circle CI](https://circleci.com/gh/jpillora/backoff.svg?style=shield)](https://circleci.com/gh/jpillora/backoff)
+
+### Install
+
+```
+$ go get -v github.com/jpillora/backoff
+```
+
+### Usage
+
+Backoff is a `time.Duration` counter. It starts at `Min`. After every call to `Duration()` it is multiplied by `Factor`. It is capped at `Max`. It returns to `Min` on every call to `Reset()`. `Jitter` adds randomness ([see below](#example-using-jitter)). Used in conjunction with the `time` package.
+
+---
+
+#### Simple example
+
+``` go
+
+b := &backoff.Backoff{
+ //These are the defaults
+ Min: 100 * time.Millisecond,
+ Max: 10 * time.Second,
+ Factor: 2,
+ Jitter: false,
+}
+
+fmt.Printf("%s\n", b.Duration())
+fmt.Printf("%s\n", b.Duration())
+fmt.Printf("%s\n", b.Duration())
+
+fmt.Printf("Reset!\n")
+b.Reset()
+
+fmt.Printf("%s\n", b.Duration())
+```
+
+```
+100ms
+200ms
+400ms
+Reset!
+100ms
+```
+
+---
+
+#### Example using `net` package
+
+``` go
+b := &backoff.Backoff{
+ Max: 5 * time.Minute,
+}
+
+for {
+ conn, err := net.Dial("tcp", "example.com:5309")
+ if err != nil {
+ d := b.Duration()
+ fmt.Printf("%s, reconnecting in %s", err, d)
+ time.Sleep(d)
+ continue
+ }
+ //connected
+ b.Reset()
+ conn.Write([]byte("hello world!"))
+ // ... Read ... Write ... etc
+ conn.Close()
+ //disconnected
+}
+
+```
+
+---
+
+#### Example using `Jitter`
+
+Enabling `Jitter` adds some randomization to the backoff durations. [See Amazon's writeup of performance gains using jitter](http://www.awsarchitectureblog.com/2015/03/backoff.html). Seeding is not necessary but doing so gives repeatable results.
+
+```go
+import "math/rand"
+
+b := &backoff.Backoff{
+ Jitter: true,
+}
+
+rand.Seed(42)
+
+fmt.Printf("%s\n", b.Duration())
+fmt.Printf("%s\n", b.Duration())
+fmt.Printf("%s\n", b.Duration())
+
+fmt.Printf("Reset!\n")
+b.Reset()
+
+fmt.Printf("%s\n", b.Duration())
+fmt.Printf("%s\n", b.Duration())
+fmt.Printf("%s\n", b.Duration())
+```
+
+```
+100ms
+106.600049ms
+281.228155ms
+Reset!
+100ms
+104.381845ms
+214.957989ms
+```
+
+#### Documentation
+
+https://godoc.org/github.com/jpillora/backoff
+
+#### Credits
+
+Forked from [some JavaScript](https://github.com/segmentio/backo) written by [@tj](https://github.com/tj)
diff --git a/vendor/github.com/jpillora/backoff/backoff.go b/vendor/github.com/jpillora/backoff/backoff.go
new file mode 100644
index 0000000..d113e68
--- /dev/null
+++ b/vendor/github.com/jpillora/backoff/backoff.go
@@ -0,0 +1,100 @@
+// Package backoff provides an exponential-backoff implementation.
+package backoff
+
+import (
+ "math"
+ "math/rand"
+ "sync/atomic"
+ "time"
+)
+
+// Backoff is a time.Duration counter, starting at Min. After every call to
+// the Duration method the current timing is multiplied by Factor, but it
+// never exceeds Max.
+//
+// Backoff is not generally concurrent-safe, but the ForAttempt method can
+// be used concurrently.
+type Backoff struct {
+ attempt uint64
+ // Factor is the multiplying factor for each increment step
+ Factor float64
+ // Jitter eases contention by randomizing backoff steps
+ Jitter bool
+ // Min and Max are the minimum and maximum values of the counter
+ Min, Max time.Duration
+}
+
+// Duration returns the duration for the current attempt before incrementing
+// the attempt counter. See ForAttempt.
+func (b *Backoff) Duration() time.Duration {
+ d := b.ForAttempt(float64(atomic.AddUint64(&b.attempt, 1) - 1))
+ return d
+}
+
+const maxInt64 = float64(math.MaxInt64 - 512)
+
+// ForAttempt returns the duration for a specific attempt. This is useful if
+// you have a large number of independent Backoffs, but don't want use
+// unnecessary memory storing the Backoff parameters per Backoff. The first
+// attempt should be 0.
+//
+// ForAttempt is concurrent-safe.
+func (b *Backoff) ForAttempt(attempt float64) time.Duration {
+ // Zero-values are nonsensical, so we use
+ // them to apply defaults
+ min := b.Min
+ if min <= 0 {
+ min = 100 * time.Millisecond
+ }
+ max := b.Max
+ if max <= 0 {
+ max = 10 * time.Second
+ }
+ if min >= max {
+ // short-circuit
+ return max
+ }
+ factor := b.Factor
+ if factor <= 0 {
+ factor = 2
+ }
+ //calculate this duration
+ minf := float64(min)
+ durf := minf * math.Pow(factor, attempt)
+ if b.Jitter {
+ durf = rand.Float64()*(durf-minf) + minf
+ }
+ //ensure float64 wont overflow int64
+ if durf > maxInt64 {
+ return max
+ }
+ dur := time.Duration(durf)
+ //keep within bounds
+ if dur < min {
+ return min
+ }
+ if dur > max {
+ return max
+ }
+ return dur
+}
+
+// Reset restarts the current attempt counter at zero.
+func (b *Backoff) Reset() {
+ atomic.StoreUint64(&b.attempt, 0)
+}
+
+// Attempt returns the current attempt counter value.
+func (b *Backoff) Attempt() float64 {
+ return float64(atomic.LoadUint64(&b.attempt))
+}
+
+// Copy returns a backoff with equals constraints as the original
+func (b *Backoff) Copy() *Backoff {
+ return &Backoff{
+ Factor: b.Factor,
+ Jitter: b.Jitter,
+ Min: b.Min,
+ Max: b.Max,
+ }
+}
diff --git a/vendor/github.com/jpillora/backoff/go.mod b/vendor/github.com/jpillora/backoff/go.mod
new file mode 100644
index 0000000..7c41bc6
--- /dev/null
+++ b/vendor/github.com/jpillora/backoff/go.mod
@@ -0,0 +1,3 @@
+module github.com/jpillora/backoff
+
+go 1.13
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
new file mode 100644
index 0000000..955dc0b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+ - "output_tests/.*"
+
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
new file mode 100644
index 0000000..1555653
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+/bug_test.go
+/coverage.txt
+/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
new file mode 100644
index 0000000..449e67c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.8.x
+ - 1.x
+
+before_install:
+ - go get -t -v ./...
+
+script:
+ - ./test.sh
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
new file mode 100644
index 0000000..c8a9fbb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.lock
@@ -0,0 +1,21 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/modern-go/concurrent"
+ packages = ["."]
+ revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+ version = "1.0.0"
+
+[[projects]]
+ name = "github.com/modern-go/reflect2"
+ packages = ["."]
+ revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
+ version = "1.0.1"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
new file mode 100644
index 0000000..313a0f8
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.toml
@@ -0,0 +1,26 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
+
+[[constraint]]
+ name = "github.com/modern-go/reflect2"
+ version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE
new file mode 100644
index 0000000..2cf4f5a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 json-iterator
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
new file mode 100644
index 0000000..50d56ff
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -0,0 +1,87 @@
+[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go)
+[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
+[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
+[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
+[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
+[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
+
+A high-performance 100% compatible drop-in replacement of "encoding/json"
+
+You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
+
+# Benchmark
+
+![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
+
+Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
+
+Raw Result (easyjson requires static code generation)
+
+| | ns/op | allocation bytes | allocation times |
+| --- | --- | --- | --- |
+| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
+| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
+| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
+| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
+| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
+| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
+
+Always benchmark with your own workload.
+The result depends heavily on the data input.
+
+# Usage
+
+100% compatibility with standard lib
+
+Replace
+
+```go
+import "encoding/json"
+json.Marshal(&data)
+```
+
+with
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Marshal(&data)
+```
+
+Replace
+
+```go
+import "encoding/json"
+json.Unmarshal(input, &data)
+```
+
+with
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Unmarshal(input, &data)
+```
+
+[More documentation](http://jsoniter.com/migrate-from-go-std.html)
+
+# How to get
+
+```
+go get github.com/json-iterator/go
+```
+
+# Contribution Welcomed !
+
+Contributors
+
+* [thockin](https://github.com/thockin)
+* [mattn](https://github.com/mattn)
+* [cch123](https://github.com/cch123)
+* [Oleg Shaldybin](https://github.com/olegshaldybin)
+* [Jason Toffaletti](https://github.com/toffaletti)
+
+Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
new file mode 100644
index 0000000..92d2cc4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/adapter.go
@@ -0,0 +1,150 @@
+package jsoniter
+
+import (
+ "bytes"
+ "io"
+)
+
+// RawMessage to make replace json with jsoniter
+type RawMessage []byte
+
+// Unmarshal adapts to json/encoding Unmarshal API
+//
+// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
+// Refer to https://godoc.org/encoding/json#Unmarshal for more information
+func Unmarshal(data []byte, v interface{}) error {
+ return ConfigDefault.Unmarshal(data, v)
+}
+
+// UnmarshalFromString is a convenient method to read from string instead of []byte
+func UnmarshalFromString(str string, v interface{}) error {
+ return ConfigDefault.UnmarshalFromString(str, v)
+}
+
+// Get quick method to get value from deeply nested JSON structure
+func Get(data []byte, path ...interface{}) Any {
+ return ConfigDefault.Get(data, path...)
+}
+
+// Marshal adapts to json/encoding Marshal API
+//
+// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
+// Refer to https://godoc.org/encoding/json#Marshal for more information
+func Marshal(v interface{}) ([]byte, error) {
+ return ConfigDefault.Marshal(v)
+}
+
+// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ return ConfigDefault.MarshalIndent(v, prefix, indent)
+}
+
+// MarshalToString convenient method to write as string instead of []byte
+func MarshalToString(v interface{}) (string, error) {
+ return ConfigDefault.MarshalToString(v)
+}
+
+// NewDecoder adapts to json/stream NewDecoder API.
+//
+// NewDecoder returns a new decoder that reads from r.
+//
+// Instead of a json/encoding Decoder, an Decoder is returned
+// Refer to https://godoc.org/encoding/json#NewDecoder for more information
+func NewDecoder(reader io.Reader) *Decoder {
+ return ConfigDefault.NewDecoder(reader)
+}
+
+// Decoder reads and decodes JSON values from an input stream.
+// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
+type Decoder struct {
+ iter *Iterator
+}
+
+// Decode decode JSON into interface{}
+func (adapter *Decoder) Decode(obj interface{}) error {
+ if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
+ if !adapter.iter.loadMore() {
+ return io.EOF
+ }
+ }
+ adapter.iter.ReadVal(obj)
+ err := adapter.iter.Error
+ if err == io.EOF {
+ return nil
+ }
+ return adapter.iter.Error
+}
+
+// More is there more?
+func (adapter *Decoder) More() bool {
+ iter := adapter.iter
+ if iter.Error != nil {
+ return false
+ }
+ c := iter.nextToken()
+ if c == 0 {
+ return false
+ }
+ iter.unreadByte()
+ return c != ']' && c != '}'
+}
+
+// Buffered remaining buffer
+func (adapter *Decoder) Buffered() io.Reader {
+ remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
+ return bytes.NewReader(remaining)
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (adapter *Decoder) UseNumber() {
+ cfg := adapter.iter.cfg.configBeforeFrozen
+ cfg.UseNumber = true
+ adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (adapter *Decoder) DisallowUnknownFields() {
+ cfg := adapter.iter.cfg.configBeforeFrozen
+ cfg.DisallowUnknownFields = true
+ adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// NewEncoder same as json.NewEncoder
+func NewEncoder(writer io.Writer) *Encoder {
+ return ConfigDefault.NewEncoder(writer)
+}
+
+// Encoder same as json.Encoder
+type Encoder struct {
+ stream *Stream
+}
+
+// Encode encode interface{} as JSON to io.Writer
+func (adapter *Encoder) Encode(val interface{}) error {
+ adapter.stream.WriteVal(val)
+ adapter.stream.WriteRaw("\n")
+ adapter.stream.Flush()
+ return adapter.stream.Error
+}
+
+// SetIndent set the indention. Prefix is not supported
+func (adapter *Encoder) SetIndent(prefix, indent string) {
+ config := adapter.stream.cfg.configBeforeFrozen
+ config.IndentionStep = len(indent)
+ adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// SetEscapeHTML escape html by default, set to false to disable
+func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
+ config := adapter.stream.cfg.configBeforeFrozen
+ config.EscapeHTML = escapeHTML
+ adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+ return ConfigDefault.Valid(data)
+}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
new file mode 100644
index 0000000..f6b8aea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any.go
@@ -0,0 +1,325 @@
+package jsoniter
+
+import (
+ "errors"
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "strconv"
+ "unsafe"
+)
+
+// Any generic object representation.
+// The lazy json implementation holds []byte and parse lazily.
+type Any interface {
+ LastError() error
+ ValueType() ValueType
+ MustBeValid() Any
+ ToBool() bool
+ ToInt() int
+ ToInt32() int32
+ ToInt64() int64
+ ToUint() uint
+ ToUint32() uint32
+ ToUint64() uint64
+ ToFloat32() float32
+ ToFloat64() float64
+ ToString() string
+ ToVal(val interface{})
+ Get(path ...interface{}) Any
+ Size() int
+ Keys() []string
+ GetInterface() interface{}
+ WriteTo(stream *Stream)
+}
+
+type baseAny struct{}
+
+func (any *baseAny) Get(path ...interface{}) Any {
+ return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *baseAny) Size() int {
+ return 0
+}
+
+func (any *baseAny) Keys() []string {
+ return []string{}
+}
+
+func (any *baseAny) ToVal(obj interface{}) {
+ panic("not implemented")
+}
+
+// WrapInt32 turn int32 into Any interface
+func WrapInt32(val int32) Any {
+ return &int32Any{baseAny{}, val}
+}
+
+// WrapInt64 turn int64 into Any interface
+func WrapInt64(val int64) Any {
+ return &int64Any{baseAny{}, val}
+}
+
+// WrapUint32 turn uint32 into Any interface
+func WrapUint32(val uint32) Any {
+ return &uint32Any{baseAny{}, val}
+}
+
+// WrapUint64 turn uint64 into Any interface
+func WrapUint64(val uint64) Any {
+ return &uint64Any{baseAny{}, val}
+}
+
+// WrapFloat64 turn float64 into Any interface
+func WrapFloat64(val float64) Any {
+ return &floatAny{baseAny{}, val}
+}
+
+// WrapString turn string into Any interface
+func WrapString(val string) Any {
+ return &stringAny{baseAny{}, val}
+}
+
+// Wrap turn a go object into Any interface
+func Wrap(val interface{}) Any {
+ if val == nil {
+ return &nilAny{}
+ }
+ asAny, isAny := val.(Any)
+ if isAny {
+ return asAny
+ }
+ typ := reflect2.TypeOf(val)
+ switch typ.Kind() {
+ case reflect.Slice:
+ return wrapArray(val)
+ case reflect.Struct:
+ return wrapStruct(val)
+ case reflect.Map:
+ return wrapMap(val)
+ case reflect.String:
+ return WrapString(val.(string))
+ case reflect.Int:
+ if strconv.IntSize == 32 {
+ return WrapInt32(int32(val.(int)))
+ }
+ return WrapInt64(int64(val.(int)))
+ case reflect.Int8:
+ return WrapInt32(int32(val.(int8)))
+ case reflect.Int16:
+ return WrapInt32(int32(val.(int16)))
+ case reflect.Int32:
+ return WrapInt32(val.(int32))
+ case reflect.Int64:
+ return WrapInt64(val.(int64))
+ case reflect.Uint:
+ if strconv.IntSize == 32 {
+ return WrapUint32(uint32(val.(uint)))
+ }
+ return WrapUint64(uint64(val.(uint)))
+ case reflect.Uintptr:
+ if ptrSize == 32 {
+ return WrapUint32(uint32(val.(uintptr)))
+ }
+ return WrapUint64(uint64(val.(uintptr)))
+ case reflect.Uint8:
+ return WrapUint32(uint32(val.(uint8)))
+ case reflect.Uint16:
+ return WrapUint32(uint32(val.(uint16)))
+ case reflect.Uint32:
+ return WrapUint32(uint32(val.(uint32)))
+ case reflect.Uint64:
+ return WrapUint64(val.(uint64))
+ case reflect.Float32:
+ return WrapFloat64(float64(val.(float32)))
+ case reflect.Float64:
+ return WrapFloat64(val.(float64))
+ case reflect.Bool:
+ if val.(bool) == true {
+ return &trueAny{}
+ }
+ return &falseAny{}
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
+}
+
+// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
+func (iter *Iterator) ReadAny() Any {
+ return iter.readAny()
+}
+
+func (iter *Iterator) readAny() Any {
+ c := iter.nextToken()
+ switch c {
+ case '"':
+ iter.unreadByte()
+ return &stringAny{baseAny{}, iter.ReadString()}
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ return &nilAny{}
+ case 't':
+ iter.skipThreeBytes('r', 'u', 'e') // true
+ return &trueAny{}
+ case 'f':
+ iter.skipFourBytes('a', 'l', 's', 'e') // false
+ return &falseAny{}
+ case '{':
+ return iter.readObjectAny()
+ case '[':
+ return iter.readArrayAny()
+ case '-':
+ return iter.readNumberAny(false)
+ case 0:
+ return &invalidAny{baseAny{}, errors.New("input is empty")}
+ default:
+ return iter.readNumberAny(true)
+ }
+}
+
+func (iter *Iterator) readNumberAny(positive bool) Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipNumber()
+ lazyBuf := iter.stopCapture()
+ return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readObjectAny() Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipObject()
+ lazyBuf := iter.stopCapture()
+ return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readArrayAny() Any {
+ iter.startCapture(iter.head - 1)
+ iter.skipArray()
+ lazyBuf := iter.stopCapture()
+ return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func locateObjectField(iter *Iterator, target string) []byte {
+ var found []byte
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ if field == target {
+ found = iter.SkipAndReturnBytes()
+ return false
+ }
+ iter.Skip()
+ return true
+ })
+ return found
+}
+
+func locateArrayElement(iter *Iterator, target int) []byte {
+ var found []byte
+ n := 0
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ if n == target {
+ found = iter.SkipAndReturnBytes()
+ return false
+ }
+ iter.Skip()
+ n++
+ return true
+ })
+ return found
+}
+
+func locatePath(iter *Iterator, path []interface{}) Any {
+ for i, pathKeyObj := range path {
+ switch pathKey := pathKeyObj.(type) {
+ case string:
+ valueBytes := locateObjectField(iter, pathKey)
+ if valueBytes == nil {
+ return newInvalidAny(path[i:])
+ }
+ iter.ResetBytes(valueBytes)
+ case int:
+ valueBytes := locateArrayElement(iter, pathKey)
+ if valueBytes == nil {
+ return newInvalidAny(path[i:])
+ }
+ iter.ResetBytes(valueBytes)
+ case int32:
+ if '*' == pathKey {
+ return iter.readAny().Get(path[i:]...)
+ }
+ return newInvalidAny(path[i:])
+ default:
+ return newInvalidAny(path[i:])
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ return &invalidAny{baseAny{}, iter.Error}
+ }
+ return iter.readAny()
+}
+
+var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
+
+func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ == anyType {
+ return &directAnyCodec{}
+ }
+ if typ.Implements(anyType) {
+ return &anyCodec{
+ valType: typ,
+ }
+ }
+ return nil
+}
+
+func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == anyType {
+ return &directAnyCodec{}
+ }
+ if typ.Implements(anyType) {
+ return &anyCodec{
+ valType: typ,
+ }
+ }
+ return nil
+}
+
+type anyCodec struct {
+ valType reflect2.Type
+}
+
+func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ panic("not implemented")
+}
+
+func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := codec.valType.UnsafeIndirect(ptr)
+ any := obj.(Any)
+ any.WriteTo(stream)
+}
+
+func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ obj := codec.valType.UnsafeIndirect(ptr)
+ any := obj.(Any)
+ return any.Size() == 0
+}
+
+type directAnyCodec struct {
+}
+
+func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *(*Any)(ptr) = iter.readAny()
+}
+
+func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ any := *(*Any)(ptr)
+ if any == nil {
+ stream.WriteNil()
+ return
+ }
+ any.WriteTo(stream)
+}
+
+func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ any := *(*Any)(ptr)
+ return any.Size() == 0
+}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
new file mode 100644
index 0000000..0449e9a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_array.go
@@ -0,0 +1,278 @@
+package jsoniter
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type arrayLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *arrayLazyAny) ValueType() ValueType {
+ return ArrayValue
+}
+
+func (any *arrayLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *arrayLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *arrayLazyAny) ToBool() bool {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.ReadArray()
+}
+
+func (any *arrayLazyAny) ToInt() int {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToInt32() int32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToInt64() int64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint() uint {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint32() uint32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToUint64() uint64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToFloat32() float32 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToFloat64() float64 {
+ if any.ToBool() {
+ return 1
+ }
+ return 0
+}
+
+func (any *arrayLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *arrayLazyAny) ToVal(val interface{}) {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadVal(val)
+}
+
+func (any *arrayLazyAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int:
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ valueBytes := locateArrayElement(iter, firstPath)
+ if valueBytes == nil {
+ return newInvalidAny(path)
+ }
+ iter.ResetBytes(valueBytes)
+ return locatePath(iter, path[1:])
+ case int32:
+ if '*' == firstPath {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ arr := make([]Any, 0)
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ found := iter.readAny().Get(path[1:]...)
+ if found.ValueType() != InvalidValue {
+ arr = append(arr, found)
+ }
+ return true
+ })
+ return wrapArray(arr)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *arrayLazyAny) Size() int {
+ size := 0
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ size++
+ iter.Skip()
+ return true
+ })
+ return size
+}
+
+func (any *arrayLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *arrayLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
+
+type arrayAny struct {
+ baseAny
+ val reflect.Value
+}
+
+func wrapArray(val interface{}) *arrayAny {
+ return &arrayAny{baseAny{}, reflect.ValueOf(val)}
+}
+
+func (any *arrayAny) ValueType() ValueType {
+ return ArrayValue
+}
+
+func (any *arrayAny) MustBeValid() Any {
+ return any
+}
+
+func (any *arrayAny) LastError() error {
+ return nil
+}
+
+func (any *arrayAny) ToBool() bool {
+ return any.val.Len() != 0
+}
+
+func (any *arrayAny) ToInt() int {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToInt32() int32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToInt64() int64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint() uint {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint32() uint32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToUint64() uint64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToFloat32() float32 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToFloat64() float64 {
+ if any.val.Len() == 0 {
+ return 0
+ }
+ return 1
+}
+
+func (any *arrayAny) ToString() string {
+ str, _ := MarshalToString(any.val.Interface())
+ return str
+}
+
+func (any *arrayAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int:
+ if firstPath < 0 || firstPath >= any.val.Len() {
+ return newInvalidAny(path)
+ }
+ return Wrap(any.val.Index(firstPath).Interface())
+ case int32:
+ if '*' == firstPath {
+ mappedAll := make([]Any, 0)
+ for i := 0; i < any.val.Len(); i++ {
+ mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll = append(mappedAll, mapped)
+ }
+ }
+ return wrapArray(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *arrayAny) Size() int {
+ return any.val.Len()
+}
+
+func (any *arrayAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *arrayAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
new file mode 100644
index 0000000..9452324
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_bool.go
@@ -0,0 +1,137 @@
+package jsoniter
+
+type trueAny struct {
+ baseAny
+}
+
+func (any *trueAny) LastError() error {
+ return nil
+}
+
+func (any *trueAny) ToBool() bool {
+ return true
+}
+
+func (any *trueAny) ToInt() int {
+ return 1
+}
+
+func (any *trueAny) ToInt32() int32 {
+ return 1
+}
+
+func (any *trueAny) ToInt64() int64 {
+ return 1
+}
+
+func (any *trueAny) ToUint() uint {
+ return 1
+}
+
+func (any *trueAny) ToUint32() uint32 {
+ return 1
+}
+
+func (any *trueAny) ToUint64() uint64 {
+ return 1
+}
+
+func (any *trueAny) ToFloat32() float32 {
+ return 1
+}
+
+func (any *trueAny) ToFloat64() float64 {
+ return 1
+}
+
+func (any *trueAny) ToString() string {
+ return "true"
+}
+
+func (any *trueAny) WriteTo(stream *Stream) {
+ stream.WriteTrue()
+}
+
+func (any *trueAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *trueAny) GetInterface() interface{} {
+ return true
+}
+
+func (any *trueAny) ValueType() ValueType {
+ return BoolValue
+}
+
+func (any *trueAny) MustBeValid() Any {
+ return any
+}
+
+type falseAny struct {
+ baseAny
+}
+
+func (any *falseAny) LastError() error {
+ return nil
+}
+
+func (any *falseAny) ToBool() bool {
+ return false
+}
+
+func (any *falseAny) ToInt() int {
+ return 0
+}
+
+func (any *falseAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *falseAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *falseAny) ToUint() uint {
+ return 0
+}
+
+func (any *falseAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *falseAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *falseAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *falseAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *falseAny) ToString() string {
+ return "false"
+}
+
+func (any *falseAny) WriteTo(stream *Stream) {
+ stream.WriteFalse()
+}
+
+func (any *falseAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *falseAny) GetInterface() interface{} {
+ return false
+}
+
+func (any *falseAny) ValueType() ValueType {
+ return BoolValue
+}
+
+func (any *falseAny) MustBeValid() Any {
+ return any
+}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
new file mode 100644
index 0000000..35fdb09
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_float.go
@@ -0,0 +1,83 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type floatAny struct {
+ baseAny
+ val float64
+}
+
+func (any *floatAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *floatAny) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *floatAny) MustBeValid() Any {
+ return any
+}
+
+func (any *floatAny) LastError() error {
+ return nil
+}
+
+func (any *floatAny) ToBool() bool {
+ return any.ToFloat64() != 0
+}
+
+func (any *floatAny) ToInt() int {
+ return int(any.val)
+}
+
+func (any *floatAny) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *floatAny) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *floatAny) ToUint() uint {
+ if any.val > 0 {
+ return uint(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToUint32() uint32 {
+ if any.val > 0 {
+ return uint32(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToUint64() uint64 {
+ if any.val > 0 {
+ return uint64(any.val)
+ }
+ return 0
+}
+
+func (any *floatAny) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *floatAny) ToFloat64() float64 {
+ return any.val
+}
+
+func (any *floatAny) ToString() string {
+ return strconv.FormatFloat(any.val, 'E', -1, 64)
+}
+
+func (any *floatAny) WriteTo(stream *Stream) {
+ stream.WriteFloat64(any.val)
+}
+
+func (any *floatAny) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
new file mode 100644
index 0000000..1b56f39
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type int32Any struct {
+ baseAny
+ val int32
+}
+
+func (any *int32Any) LastError() error {
+ return nil
+}
+
+func (any *int32Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *int32Any) MustBeValid() Any {
+ return any
+}
+
+func (any *int32Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *int32Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *int32Any) ToInt32() int32 {
+ return any.val
+}
+
+func (any *int32Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *int32Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *int32Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *int32Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *int32Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *int32Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *int32Any) ToString() string {
+ return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *int32Any) WriteTo(stream *Stream) {
+ stream.WriteInt32(any.val)
+}
+
+func (any *int32Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *int32Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
new file mode 100644
index 0000000..c440d72
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type int64Any struct {
+ baseAny
+ val int64
+}
+
+func (any *int64Any) LastError() error {
+ return nil
+}
+
+func (any *int64Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *int64Any) MustBeValid() Any {
+ return any
+}
+
+func (any *int64Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *int64Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *int64Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *int64Any) ToInt64() int64 {
+ return any.val
+}
+
+func (any *int64Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *int64Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *int64Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *int64Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *int64Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *int64Any) ToString() string {
+ return strconv.FormatInt(any.val, 10)
+}
+
+func (any *int64Any) WriteTo(stream *Stream) {
+ stream.WriteInt64(any.val)
+}
+
+func (any *int64Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *int64Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
new file mode 100644
index 0000000..1d859ea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_invalid.go
@@ -0,0 +1,82 @@
+package jsoniter
+
+import "fmt"
+
+type invalidAny struct {
+ baseAny
+ err error
+}
+
+func newInvalidAny(path []interface{}) *invalidAny {
+ return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
+}
+
+func (any *invalidAny) LastError() error {
+ return any.err
+}
+
+func (any *invalidAny) ValueType() ValueType {
+ return InvalidValue
+}
+
+func (any *invalidAny) MustBeValid() Any {
+ panic(any.err)
+}
+
+func (any *invalidAny) ToBool() bool {
+ return false
+}
+
+func (any *invalidAny) ToInt() int {
+ return 0
+}
+
+func (any *invalidAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *invalidAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *invalidAny) ToUint() uint {
+ return 0
+}
+
+func (any *invalidAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *invalidAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *invalidAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *invalidAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *invalidAny) ToString() string {
+ return ""
+}
+
+func (any *invalidAny) WriteTo(stream *Stream) {
+}
+
+func (any *invalidAny) Get(path ...interface{}) Any {
+ if any.err == nil {
+ return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
+}
+
+func (any *invalidAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *invalidAny) GetInterface() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
new file mode 100644
index 0000000..d04cb54
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_nil.go
@@ -0,0 +1,69 @@
+package jsoniter
+
+type nilAny struct {
+ baseAny
+}
+
+func (any *nilAny) LastError() error {
+ return nil
+}
+
+func (any *nilAny) ValueType() ValueType {
+ return NilValue
+}
+
+func (any *nilAny) MustBeValid() Any {
+ return any
+}
+
+func (any *nilAny) ToBool() bool {
+ return false
+}
+
+func (any *nilAny) ToInt() int {
+ return 0
+}
+
+func (any *nilAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *nilAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *nilAny) ToUint() uint {
+ return 0
+}
+
+func (any *nilAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *nilAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *nilAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *nilAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *nilAny) ToString() string {
+ return ""
+}
+
+func (any *nilAny) WriteTo(stream *Stream) {
+ stream.WriteNil()
+}
+
+func (any *nilAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *nilAny) GetInterface() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
new file mode 100644
index 0000000..9d1e901
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_number.go
@@ -0,0 +1,123 @@
+package jsoniter
+
+import (
+ "io"
+ "unsafe"
+)
+
+type numberLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *numberLazyAny) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *numberLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *numberLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *numberLazyAny) ToBool() bool {
+ return any.ToFloat64() != 0
+}
+
+func (any *numberLazyAny) ToInt() int {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToInt32() int32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToInt64() int64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadInt64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint() uint {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint32() uint32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToUint64() uint64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadUint64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToFloat32() float32 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadFloat32()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToFloat64() float64 {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ val := iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ any.err = iter.Error
+ }
+ return val
+}
+
+func (any *numberLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *numberLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *numberLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
new file mode 100644
index 0000000..c44ef5c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_object.go
@@ -0,0 +1,374 @@
+package jsoniter
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+type objectLazyAny struct {
+ baseAny
+ cfg *frozenConfig
+ buf []byte
+ err error
+}
+
+func (any *objectLazyAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *objectLazyAny) MustBeValid() Any {
+ return any
+}
+
+func (any *objectLazyAny) LastError() error {
+ return any.err
+}
+
+func (any *objectLazyAny) ToBool() bool {
+ return true
+}
+
+func (any *objectLazyAny) ToInt() int {
+ return 0
+}
+
+func (any *objectLazyAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint() uint {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *objectLazyAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *objectLazyAny) ToString() string {
+ return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *objectLazyAny) ToVal(obj interface{}) {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadVal(obj)
+}
+
+func (any *objectLazyAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case string:
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ valueBytes := locateObjectField(iter, firstPath)
+ if valueBytes == nil {
+ return newInvalidAny(path)
+ }
+ iter.ResetBytes(valueBytes)
+ return locatePath(iter, path[1:])
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadMapCB(func(iter *Iterator, field string) bool {
+ mapped := locatePath(iter, path[1:])
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[field] = mapped
+ }
+ return true
+ })
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *objectLazyAny) Keys() []string {
+ keys := []string{}
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadMapCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ keys = append(keys, field)
+ return true
+ })
+ return keys
+}
+
+func (any *objectLazyAny) Size() int {
+ size := 0
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ size++
+ return true
+ })
+ return size
+}
+
+func (any *objectLazyAny) WriteTo(stream *Stream) {
+ stream.Write(any.buf)
+}
+
+func (any *objectLazyAny) GetInterface() interface{} {
+ iter := any.cfg.BorrowIterator(any.buf)
+ defer any.cfg.ReturnIterator(iter)
+ return iter.Read()
+}
+
+type objectAny struct {
+ baseAny
+ err error
+ val reflect.Value
+}
+
+func wrapStruct(val interface{}) *objectAny {
+ return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *objectAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *objectAny) MustBeValid() Any {
+ return any
+}
+
+func (any *objectAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *objectAny) LastError() error {
+ return any.err
+}
+
+func (any *objectAny) ToBool() bool {
+ return any.val.NumField() != 0
+}
+
+func (any *objectAny) ToInt() int {
+ return 0
+}
+
+func (any *objectAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *objectAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *objectAny) ToUint() uint {
+ return 0
+}
+
+func (any *objectAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *objectAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *objectAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *objectAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *objectAny) ToString() string {
+ str, err := MarshalToString(any.val.Interface())
+ any.err = err
+ return str
+}
+
+func (any *objectAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case string:
+ field := any.val.FieldByName(firstPath)
+ if !field.IsValid() {
+ return newInvalidAny(path)
+ }
+ return Wrap(field.Interface())
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ for i := 0; i < any.val.NumField(); i++ {
+ field := any.val.Field(i)
+ if field.CanInterface() {
+ mapped := Wrap(field.Interface()).Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[any.val.Type().Field(i).Name] = mapped
+ }
+ }
+ }
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ return newInvalidAny(path)
+ }
+}
+
+func (any *objectAny) Keys() []string {
+ keys := make([]string, 0, any.val.NumField())
+ for i := 0; i < any.val.NumField(); i++ {
+ keys = append(keys, any.val.Type().Field(i).Name)
+ }
+ return keys
+}
+
+func (any *objectAny) Size() int {
+ return any.val.NumField()
+}
+
+func (any *objectAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *objectAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
+
+type mapAny struct {
+ baseAny
+ err error
+ val reflect.Value
+}
+
+func wrapMap(val interface{}) *mapAny {
+ return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *mapAny) ValueType() ValueType {
+ return ObjectValue
+}
+
+func (any *mapAny) MustBeValid() Any {
+ return any
+}
+
+func (any *mapAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *mapAny) LastError() error {
+ return any.err
+}
+
+func (any *mapAny) ToBool() bool {
+ return true
+}
+
+func (any *mapAny) ToInt() int {
+ return 0
+}
+
+func (any *mapAny) ToInt32() int32 {
+ return 0
+}
+
+func (any *mapAny) ToInt64() int64 {
+ return 0
+}
+
+func (any *mapAny) ToUint() uint {
+ return 0
+}
+
+func (any *mapAny) ToUint32() uint32 {
+ return 0
+}
+
+func (any *mapAny) ToUint64() uint64 {
+ return 0
+}
+
+func (any *mapAny) ToFloat32() float32 {
+ return 0
+}
+
+func (any *mapAny) ToFloat64() float64 {
+ return 0
+}
+
+func (any *mapAny) ToString() string {
+ str, err := MarshalToString(any.val.Interface())
+ any.err = err
+ return str
+}
+
+func (any *mapAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ switch firstPath := path[0].(type) {
+ case int32:
+ if '*' == firstPath {
+ mappedAll := map[string]Any{}
+ for _, key := range any.val.MapKeys() {
+ keyAsStr := key.String()
+ element := Wrap(any.val.MapIndex(key).Interface())
+ mapped := element.Get(path[1:]...)
+ if mapped.ValueType() != InvalidValue {
+ mappedAll[keyAsStr] = mapped
+ }
+ }
+ return wrapMap(mappedAll)
+ }
+ return newInvalidAny(path)
+ default:
+ value := any.val.MapIndex(reflect.ValueOf(firstPath))
+ if !value.IsValid() {
+ return newInvalidAny(path)
+ }
+ return Wrap(value.Interface())
+ }
+}
+
+func (any *mapAny) Keys() []string {
+ keys := make([]string, 0, any.val.Len())
+ for _, key := range any.val.MapKeys() {
+ keys = append(keys, key.String())
+ }
+ return keys
+}
+
+func (any *mapAny) Size() int {
+ return any.val.Len()
+}
+
+func (any *mapAny) WriteTo(stream *Stream) {
+ stream.WriteVal(any.val)
+}
+
+func (any *mapAny) GetInterface() interface{} {
+ return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
new file mode 100644
index 0000000..a4b93c7
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_str.go
@@ -0,0 +1,166 @@
+package jsoniter
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type stringAny struct {
+ baseAny
+ val string
+}
+
+func (any *stringAny) Get(path ...interface{}) Any {
+ if len(path) == 0 {
+ return any
+ }
+ return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *stringAny) Parse() *Iterator {
+ return nil
+}
+
+func (any *stringAny) ValueType() ValueType {
+ return StringValue
+}
+
+func (any *stringAny) MustBeValid() Any {
+ return any
+}
+
+func (any *stringAny) LastError() error {
+ return nil
+}
+
+func (any *stringAny) ToBool() bool {
+ str := any.ToString()
+ if str == "0" {
+ return false
+ }
+ for _, c := range str {
+ switch c {
+ case ' ', '\n', '\r', '\t':
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+func (any *stringAny) ToInt() int {
+ return int(any.ToInt64())
+
+}
+
+func (any *stringAny) ToInt32() int32 {
+ return int32(any.ToInt64())
+}
+
+func (any *stringAny) ToInt64() int64 {
+ if any.val == "" {
+ return 0
+ }
+
+ flag := 1
+ startPos := 0
+ endPos := 0
+ if any.val[0] == '+' || any.val[0] == '-' {
+ startPos = 1
+ }
+
+ if any.val[0] == '-' {
+ flag = -1
+ }
+
+ for i := startPos; i < len(any.val); i++ {
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ break
+ }
+ }
+ parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
+ return int64(flag) * parsed
+}
+
+func (any *stringAny) ToUint() uint {
+ return uint(any.ToUint64())
+}
+
+func (any *stringAny) ToUint32() uint32 {
+ return uint32(any.ToUint64())
+}
+
+func (any *stringAny) ToUint64() uint64 {
+ if any.val == "" {
+ return 0
+ }
+
+ startPos := 0
+ endPos := 0
+
+ if any.val[0] == '-' {
+ return 0
+ }
+ if any.val[0] == '+' {
+ startPos = 1
+ }
+
+ for i := startPos; i < len(any.val); i++ {
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ break
+ }
+ }
+ parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
+ return parsed
+}
+
+func (any *stringAny) ToFloat32() float32 {
+ return float32(any.ToFloat64())
+}
+
+func (any *stringAny) ToFloat64() float64 {
+ if len(any.val) == 0 {
+ return 0
+ }
+
+ // first char invalid
+ if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
+ return 0
+ }
+
+ // extract valid num expression from string
+ // eg 123true => 123, -12.12xxa => -12.12
+ endPos := 1
+ for i := 1; i < len(any.val); i++ {
+ if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
+ endPos = i + 1
+ continue
+ }
+
+ // end position is the first char which is not digit
+ if any.val[i] >= '0' && any.val[i] <= '9' {
+ endPos = i + 1
+ } else {
+ endPos = i
+ break
+ }
+ }
+ parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
+ return parsed
+}
+
+func (any *stringAny) ToString() string {
+ return any.val
+}
+
+func (any *stringAny) WriteTo(stream *Stream) {
+ stream.WriteString(any.val)
+}
+
+func (any *stringAny) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
new file mode 100644
index 0000000..656bbd3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type uint32Any struct {
+ baseAny
+ val uint32
+}
+
+func (any *uint32Any) LastError() error {
+ return nil
+}
+
+func (any *uint32Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *uint32Any) MustBeValid() Any {
+ return any
+}
+
+func (any *uint32Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *uint32Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *uint32Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *uint32Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *uint32Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *uint32Any) ToUint32() uint32 {
+ return any.val
+}
+
+func (any *uint32Any) ToUint64() uint64 {
+ return uint64(any.val)
+}
+
+func (any *uint32Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *uint32Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *uint32Any) ToString() string {
+ return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *uint32Any) WriteTo(stream *Stream) {
+ stream.WriteUint32(any.val)
+}
+
+func (any *uint32Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *uint32Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
new file mode 100644
index 0000000..7df2fce
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+ "strconv"
+)
+
+type uint64Any struct {
+ baseAny
+ val uint64
+}
+
+func (any *uint64Any) LastError() error {
+ return nil
+}
+
+func (any *uint64Any) ValueType() ValueType {
+ return NumberValue
+}
+
+func (any *uint64Any) MustBeValid() Any {
+ return any
+}
+
+func (any *uint64Any) ToBool() bool {
+ return any.val != 0
+}
+
+func (any *uint64Any) ToInt() int {
+ return int(any.val)
+}
+
+func (any *uint64Any) ToInt32() int32 {
+ return int32(any.val)
+}
+
+func (any *uint64Any) ToInt64() int64 {
+ return int64(any.val)
+}
+
+func (any *uint64Any) ToUint() uint {
+ return uint(any.val)
+}
+
+func (any *uint64Any) ToUint32() uint32 {
+ return uint32(any.val)
+}
+
+func (any *uint64Any) ToUint64() uint64 {
+ return any.val
+}
+
+func (any *uint64Any) ToFloat32() float32 {
+ return float32(any.val)
+}
+
+func (any *uint64Any) ToFloat64() float64 {
+ return float64(any.val)
+}
+
+func (any *uint64Any) ToString() string {
+ return strconv.FormatUint(any.val, 10)
+}
+
+func (any *uint64Any) WriteTo(stream *Stream) {
+ stream.WriteUint64(any.val)
+}
+
+func (any *uint64Any) Parse() *Iterator {
+ return nil
+}
+
+func (any *uint64Any) GetInterface() interface{} {
+ return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
new file mode 100644
index 0000000..b45ef68
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/build.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+set -x
+
+if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
+ mkdir -p /tmp/build-golang/src/github.com/json-iterator
+ ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
+fi
+export GOPATH=/tmp/build-golang
+go get -u github.com/golang/dep/cmd/dep
+cd /tmp/build-golang/src/github.com/json-iterator/go
+exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
new file mode 100644
index 0000000..8c58fcb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/config.go
@@ -0,0 +1,375 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "io"
+ "reflect"
+ "sync"
+ "unsafe"
+
+ "github.com/modern-go/concurrent"
+ "github.com/modern-go/reflect2"
+)
+
+// Config customize how the API should behave.
+// The API is created from Config by Froze.
+type Config struct {
+ IndentionStep int
+ MarshalFloatWith6Digits bool
+ EscapeHTML bool
+ SortMapKeys bool
+ UseNumber bool
+ DisallowUnknownFields bool
+ TagKey string
+ OnlyTaggedField bool
+ ValidateJsonRawMessage bool
+ ObjectFieldMustBeSimpleString bool
+ CaseSensitive bool
+}
+
+// API the public interface of this package.
+// Primary Marshal and Unmarshal.
+type API interface {
+ IteratorPool
+ StreamPool
+ MarshalToString(v interface{}) (string, error)
+ Marshal(v interface{}) ([]byte, error)
+ MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
+ UnmarshalFromString(str string, v interface{}) error
+ Unmarshal(data []byte, v interface{}) error
+ Get(data []byte, path ...interface{}) Any
+ NewEncoder(writer io.Writer) *Encoder
+ NewDecoder(reader io.Reader) *Decoder
+ Valid(data []byte) bool
+ RegisterExtension(extension Extension)
+ DecoderOf(typ reflect2.Type) ValDecoder
+ EncoderOf(typ reflect2.Type) ValEncoder
+}
+
+// ConfigDefault the default API
+var ConfigDefault = Config{
+ EscapeHTML: true,
+}.Froze()
+
+// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
+var ConfigCompatibleWithStandardLibrary = Config{
+ EscapeHTML: true,
+ SortMapKeys: true,
+ ValidateJsonRawMessage: true,
+}.Froze()
+
+// ConfigFastest marshals float with only 6 digits precision
+var ConfigFastest = Config{
+ EscapeHTML: false,
+ MarshalFloatWith6Digits: true, // will lose precession
+ ObjectFieldMustBeSimpleString: true, // do not unescape object field
+}.Froze()
+
+type frozenConfig struct {
+ configBeforeFrozen Config
+ sortMapKeys bool
+ indentionStep int
+ objectFieldMustBeSimpleString bool
+ onlyTaggedField bool
+ disallowUnknownFields bool
+ decoderCache *concurrent.Map
+ encoderCache *concurrent.Map
+ encoderExtension Extension
+ decoderExtension Extension
+ extraExtensions []Extension
+ streamPool *sync.Pool
+ iteratorPool *sync.Pool
+ caseSensitive bool
+}
+
+func (cfg *frozenConfig) initCache() {
+ cfg.decoderCache = concurrent.NewMap()
+ cfg.encoderCache = concurrent.NewMap()
+}
+
+func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
+ cfg.decoderCache.Store(cacheKey, decoder)
+}
+
+func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
+ cfg.encoderCache.Store(cacheKey, encoder)
+}
+
+func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
+ decoder, found := cfg.decoderCache.Load(cacheKey)
+ if found {
+ return decoder.(ValDecoder)
+ }
+ return nil
+}
+
+func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
+ encoder, found := cfg.encoderCache.Load(cacheKey)
+ if found {
+ return encoder.(ValEncoder)
+ }
+ return nil
+}
+
+var cfgCache = concurrent.NewMap()
+
+func getFrozenConfigFromCache(cfg Config) *frozenConfig {
+ obj, found := cfgCache.Load(cfg)
+ if found {
+ return obj.(*frozenConfig)
+ }
+ return nil
+}
+
+func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
+ cfgCache.Store(cfg, frozenConfig)
+}
+
+// Froze forge API from config
+func (cfg Config) Froze() API {
+ api := &frozenConfig{
+ sortMapKeys: cfg.SortMapKeys,
+ indentionStep: cfg.IndentionStep,
+ objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
+ onlyTaggedField: cfg.OnlyTaggedField,
+ disallowUnknownFields: cfg.DisallowUnknownFields,
+ caseSensitive: cfg.CaseSensitive,
+ }
+ api.streamPool = &sync.Pool{
+ New: func() interface{} {
+ return NewStream(api, nil, 512)
+ },
+ }
+ api.iteratorPool = &sync.Pool{
+ New: func() interface{} {
+ return NewIterator(api)
+ },
+ }
+ api.initCache()
+ encoderExtension := EncoderExtension{}
+ decoderExtension := DecoderExtension{}
+ if cfg.MarshalFloatWith6Digits {
+ api.marshalFloatWith6Digits(encoderExtension)
+ }
+ if cfg.EscapeHTML {
+ api.escapeHTML(encoderExtension)
+ }
+ if cfg.UseNumber {
+ api.useNumber(decoderExtension)
+ }
+ if cfg.ValidateJsonRawMessage {
+ api.validateJsonRawMessage(encoderExtension)
+ }
+ api.encoderExtension = encoderExtension
+ api.decoderExtension = decoderExtension
+ api.configBeforeFrozen = cfg
+ return api
+}
+
+func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
+ api := getFrozenConfigFromCache(cfg)
+ if api != nil {
+ return api
+ }
+ api = cfg.Froze().(*frozenConfig)
+ for _, extension := range extraExtensions {
+ api.RegisterExtension(extension)
+ }
+ addFrozenConfigToCache(cfg, api)
+ return api
+}
+
+func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
+ encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
+ rawMessage := *(*json.RawMessage)(ptr)
+ iter := cfg.BorrowIterator([]byte(rawMessage))
+ iter.Read()
+ if iter.Error != nil {
+ stream.WriteRaw("null")
+ } else {
+ cfg.ReturnIterator(iter)
+ stream.WriteRaw(string(rawMessage))
+ }
+ }, func(ptr unsafe.Pointer) bool {
+ return len(*((*json.RawMessage)(ptr))) == 0
+ }}
+ extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
+ extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
+}
+
+func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
+ extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
+ exitingValue := *((*interface{})(ptr))
+ if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
+ iter.ReadVal(exitingValue)
+ return
+ }
+ if iter.WhatIsNext() == NumberValue {
+ *((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
+ } else {
+ *((*interface{})(ptr)) = iter.Read()
+ }
+ }}
+}
+func (cfg *frozenConfig) getTagKey() string {
+ tagKey := cfg.configBeforeFrozen.TagKey
+ if tagKey == "" {
+ return "json"
+ }
+ return tagKey
+}
+
+func (cfg *frozenConfig) RegisterExtension(extension Extension) {
+ cfg.extraExtensions = append(cfg.extraExtensions, extension)
+ copied := cfg.configBeforeFrozen
+ cfg.configBeforeFrozen = copied
+}
+
+type lossyFloat32Encoder struct {
+}
+
+func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat32Lossy(*((*float32)(ptr)))
+}
+
+func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float32)(ptr)) == 0
+}
+
+type lossyFloat64Encoder struct {
+}
+
+func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat64Lossy(*((*float64)(ptr)))
+}
+
+func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float64)(ptr)) == 0
+}
+
+// EnableLossyFloatMarshalling keeps 10**(-6) precision
+// for float variables for better performance.
+func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
+ // for better performance
+ extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
+ extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
+}
+
+type htmlEscapedStringEncoder struct {
+}
+
+func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ str := *((*string)(ptr))
+ stream.WriteStringWithHTMLEscaped(str)
+}
+
+func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*string)(ptr)) == ""
+}
+
+func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
+ encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
+}
+
+func (cfg *frozenConfig) cleanDecoders() {
+ typeDecoders = map[string]ValDecoder{}
+ fieldDecoders = map[string]ValDecoder{}
+ *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) cleanEncoders() {
+ typeEncoders = map[string]ValEncoder{}
+ fieldEncoders = map[string]ValEncoder{}
+ *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
+ stream := cfg.BorrowStream(nil)
+ defer cfg.ReturnStream(stream)
+ stream.WriteVal(v)
+ if stream.Error != nil {
+ return "", stream.Error
+ }
+ return string(stream.Buffer()), nil
+}
+
+func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
+ stream := cfg.BorrowStream(nil)
+ defer cfg.ReturnStream(stream)
+ stream.WriteVal(v)
+ if stream.Error != nil {
+ return nil, stream.Error
+ }
+ result := stream.Buffer()
+ copied := make([]byte, len(result))
+ copy(copied, result)
+ return copied, nil
+}
+
+func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ if prefix != "" {
+ panic("prefix is not supported")
+ }
+ for _, r := range indent {
+ if r != ' ' {
+ panic("indent can only be space")
+ }
+ }
+ newCfg := cfg.configBeforeFrozen
+ newCfg.IndentionStep = len(indent)
+ return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
+}
+
+func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
+ data := []byte(str)
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.ReadVal(v)
+ c := iter.nextToken()
+ if c == 0 {
+ if iter.Error == io.EOF {
+ return nil
+ }
+ return iter.Error
+ }
+ iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+ return iter.Error
+}
+
+func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ return locatePath(iter, path)
+}
+
+func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.ReadVal(v)
+ c := iter.nextToken()
+ if c == 0 {
+ if iter.Error == io.EOF {
+ return nil
+ }
+ return iter.Error
+ }
+ iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+ return iter.Error
+}
+
+func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
+ stream := NewStream(cfg, writer, 512)
+ return &Encoder{stream}
+}
+
+func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
+ iter := Parse(cfg, reader, 512)
+ return &Decoder{iter}
+}
+
+func (cfg *frozenConfig) Valid(data []byte) bool {
+ iter := cfg.BorrowIterator(data)
+ defer cfg.ReturnIterator(iter)
+ iter.Skip()
+ return iter.Error == nil
+}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
new file mode 100644
index 0000000..3095662
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
@@ -0,0 +1,7 @@
+| json type \ dest type | bool | int | uint | float |string|
+| --- | --- | --- | --- |--|--|
+| number | positive => true negative => true zero => false| 23.2 => 23 -32.1 => -32| 12.1 => 12 -12.1 => 0|as normal|same as origin|
+| string | empty string => false string "0" => false other strings => true | "123.32" => 123 "-123.4" => -123 "123.23xxxw" => 123 "abcde12" => 0 "-32.1" => -32| 13.2 => 13 -1.1 => 0 |12.1 => 12.1 -12.3 => -12.3 12.4xxa => 12.4 +1.1e2 =>110 |same as origin|
+| bool | true => true false => false| true => 1 false => 0 | true => 1 false => 0 |true => 1 false => 0|true => "true" false => "false"|
+| object | true | 0 | 0 |0|originnal json|
+| array | empty array => false nonempty array => true| [] => 0 [1,2] => 1 | [] => 0 [1,2] => 1 |[] => 0 [1,2] => 1|original json|
\ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod
new file mode 100644
index 0000000..e05c42f
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/go.mod
@@ -0,0 +1,11 @@
+module github.com/json-iterator/go
+
+go 1.12
+
+require (
+ github.com/davecgh/go-spew v1.1.1
+ github.com/google/gofuzz v1.0.0
+ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421
+ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742
+ github.com/stretchr/testify v1.3.0
+)
diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum
new file mode 100644
index 0000000..d778b5a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/go.sum
@@ -0,0 +1,14 @@
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
new file mode 100644
index 0000000..29b31cf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter.go
@@ -0,0 +1,349 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// ValueType the type for JSON element
+type ValueType int
+
+const (
+ // InvalidValue invalid JSON element
+ InvalidValue ValueType = iota
+ // StringValue JSON element "string"
+ StringValue
+ // NumberValue JSON element 100 or 0.10
+ NumberValue
+ // NilValue JSON element null
+ NilValue
+ // BoolValue JSON element true or false
+ BoolValue
+ // ArrayValue JSON element []
+ ArrayValue
+ // ObjectValue JSON element {}
+ ObjectValue
+)
+
+var hexDigits []byte
+var valueTypes []ValueType
+
+func init() {
+ hexDigits = make([]byte, 256)
+ for i := 0; i < len(hexDigits); i++ {
+ hexDigits[i] = 255
+ }
+ for i := '0'; i <= '9'; i++ {
+ hexDigits[i] = byte(i - '0')
+ }
+ for i := 'a'; i <= 'f'; i++ {
+ hexDigits[i] = byte((i - 'a') + 10)
+ }
+ for i := 'A'; i <= 'F'; i++ {
+ hexDigits[i] = byte((i - 'A') + 10)
+ }
+ valueTypes = make([]ValueType, 256)
+ for i := 0; i < len(valueTypes); i++ {
+ valueTypes[i] = InvalidValue
+ }
+ valueTypes['"'] = StringValue
+ valueTypes['-'] = NumberValue
+ valueTypes['0'] = NumberValue
+ valueTypes['1'] = NumberValue
+ valueTypes['2'] = NumberValue
+ valueTypes['3'] = NumberValue
+ valueTypes['4'] = NumberValue
+ valueTypes['5'] = NumberValue
+ valueTypes['6'] = NumberValue
+ valueTypes['7'] = NumberValue
+ valueTypes['8'] = NumberValue
+ valueTypes['9'] = NumberValue
+ valueTypes['t'] = BoolValue
+ valueTypes['f'] = BoolValue
+ valueTypes['n'] = NilValue
+ valueTypes['['] = ArrayValue
+ valueTypes['{'] = ObjectValue
+}
+
+// Iterator is a io.Reader like object, with JSON specific read functions.
+// Error is not returned as return value, but stored as Error member on this iterator instance.
+type Iterator struct {
+ cfg *frozenConfig
+ reader io.Reader
+ buf []byte
+ head int
+ tail int
+ depth int
+ captureStartedAt int
+ captured []byte
+ Error error
+ Attachment interface{} // open for customized decoder
+}
+
+// NewIterator creates an empty Iterator instance
+func NewIterator(cfg API) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: nil,
+ buf: nil,
+ head: 0,
+ tail: 0,
+ depth: 0,
+ }
+}
+
+// Parse creates an Iterator instance from io.Reader
+func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: reader,
+ buf: make([]byte, bufSize),
+ head: 0,
+ tail: 0,
+ depth: 0,
+ }
+}
+
+// ParseBytes creates an Iterator instance from byte array
+func ParseBytes(cfg API, input []byte) *Iterator {
+ return &Iterator{
+ cfg: cfg.(*frozenConfig),
+ reader: nil,
+ buf: input,
+ head: 0,
+ tail: len(input),
+ depth: 0,
+ }
+}
+
+// ParseString creates an Iterator instance from string
+func ParseString(cfg API, input string) *Iterator {
+ return ParseBytes(cfg, []byte(input))
+}
+
+// Pool returns a pool can provide more iterator with same configuration
+func (iter *Iterator) Pool() IteratorPool {
+ return iter.cfg
+}
+
+// Reset reuse iterator instance by specifying another reader
+func (iter *Iterator) Reset(reader io.Reader) *Iterator {
+ iter.reader = reader
+ iter.head = 0
+ iter.tail = 0
+ iter.depth = 0
+ return iter
+}
+
+// ResetBytes reuse iterator instance by specifying another byte array as input
+func (iter *Iterator) ResetBytes(input []byte) *Iterator {
+ iter.reader = nil
+ iter.buf = input
+ iter.head = 0
+ iter.tail = len(input)
+ iter.depth = 0
+ return iter
+}
+
+// WhatIsNext gets ValueType of relatively next json element
+func (iter *Iterator) WhatIsNext() ValueType {
+ valueType := valueTypes[iter.nextToken()]
+ iter.unreadByte()
+ return valueType
+}
+
+func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ }
+ iter.head = i
+ return false
+ }
+ return true
+}
+
+func (iter *Iterator) isObjectEnd() bool {
+ c := iter.nextToken()
+ if c == ',' {
+ return false
+ }
+ if c == '}' {
+ return true
+ }
+ iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
+ return true
+}
+
+func (iter *Iterator) nextToken() byte {
+ // a variation of skip whitespaces, returning the next non-whitespace token
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\t', '\r':
+ continue
+ }
+ iter.head = i + 1
+ return c
+ }
+ if !iter.loadMore() {
+ return 0
+ }
+ }
+}
+
+// ReportError record a error in iterator instance with current position.
+func (iter *Iterator) ReportError(operation string, msg string) {
+ if iter.Error != nil {
+ if iter.Error != io.EOF {
+ return
+ }
+ }
+ peekStart := iter.head - 10
+ if peekStart < 0 {
+ peekStart = 0
+ }
+ peekEnd := iter.head + 10
+ if peekEnd > iter.tail {
+ peekEnd = iter.tail
+ }
+ parsing := string(iter.buf[peekStart:peekEnd])
+ contextStart := iter.head - 50
+ if contextStart < 0 {
+ contextStart = 0
+ }
+ contextEnd := iter.head + 50
+ if contextEnd > iter.tail {
+ contextEnd = iter.tail
+ }
+ context := string(iter.buf[contextStart:contextEnd])
+ iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
+ operation, msg, iter.head-peekStart, parsing, context)
+}
+
+// CurrentBuffer gets current buffer as string for debugging purpose
+func (iter *Iterator) CurrentBuffer() string {
+ peekStart := iter.head - 10
+ if peekStart < 0 {
+ peekStart = 0
+ }
+ return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
+ string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
+}
+
+func (iter *Iterator) readByte() (ret byte) {
+ if iter.head == iter.tail {
+ if iter.loadMore() {
+ ret = iter.buf[iter.head]
+ iter.head++
+ return ret
+ }
+ return 0
+ }
+ ret = iter.buf[iter.head]
+ iter.head++
+ return ret
+}
+
+func (iter *Iterator) loadMore() bool {
+ if iter.reader == nil {
+ if iter.Error == nil {
+ iter.head = iter.tail
+ iter.Error = io.EOF
+ }
+ return false
+ }
+ if iter.captured != nil {
+ iter.captured = append(iter.captured,
+ iter.buf[iter.captureStartedAt:iter.tail]...)
+ iter.captureStartedAt = 0
+ }
+ for {
+ n, err := iter.reader.Read(iter.buf)
+ if n == 0 {
+ if err != nil {
+ if iter.Error == nil {
+ iter.Error = err
+ }
+ return false
+ }
+ } else {
+ iter.head = 0
+ iter.tail = n
+ return true
+ }
+ }
+}
+
+func (iter *Iterator) unreadByte() {
+ if iter.Error != nil {
+ return
+ }
+ iter.head--
+ return
+}
+
+// Read read the next JSON element as generic interface{}.
+func (iter *Iterator) Read() interface{} {
+ valueType := iter.WhatIsNext()
+ switch valueType {
+ case StringValue:
+ return iter.ReadString()
+ case NumberValue:
+ if iter.cfg.configBeforeFrozen.UseNumber {
+ return json.Number(iter.readNumberAsString())
+ }
+ return iter.ReadFloat64()
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ return nil
+ case BoolValue:
+ return iter.ReadBool()
+ case ArrayValue:
+ arr := []interface{}{}
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ var elem interface{}
+ iter.ReadVal(&elem)
+ arr = append(arr, elem)
+ return true
+ })
+ return arr
+ case ObjectValue:
+ obj := map[string]interface{}{}
+ iter.ReadMapCB(func(Iter *Iterator, field string) bool {
+ var elem interface{}
+ iter.ReadVal(&elem)
+ obj[field] = elem
+ return true
+ })
+ return obj
+ default:
+ iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
+ return nil
+ }
+}
+
+// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
+const maxDepth = 10000
+
+func (iter *Iterator) incrementDepth() (success bool) {
+ iter.depth++
+ if iter.depth <= maxDepth {
+ return true
+ }
+ iter.ReportError("incrementDepth", "exceeded max depth")
+ return false
+}
+
+func (iter *Iterator) decrementDepth() (success bool) {
+ iter.depth--
+ if iter.depth >= 0 {
+ return true
+ }
+ iter.ReportError("decrementDepth", "unexpected negative nesting")
+ return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
new file mode 100644
index 0000000..204fe0e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_array.go
@@ -0,0 +1,64 @@
+package jsoniter
+
+// ReadArray read array element, tells if the array has more element to read.
+func (iter *Iterator) ReadArray() (ret bool) {
+ c := iter.nextToken()
+ switch c {
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l')
+ return false // null
+ case '[':
+ c = iter.nextToken()
+ if c != ']' {
+ iter.unreadByte()
+ return true
+ }
+ return false
+ case ']':
+ return false
+ case ',':
+ return true
+ default:
+ iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
+ return
+ }
+}
+
+// ReadArrayCB read array with callback
+func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
+ c := iter.nextToken()
+ if c == '[' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c != ']' {
+ iter.unreadByte()
+ if !callback(iter) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ if !callback(iter) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != ']' {
+ iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ return iter.decrementDepth()
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
+ return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
new file mode 100644
index 0000000..b975463
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_float.go
@@ -0,0 +1,339 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "io"
+ "math/big"
+ "strconv"
+ "strings"
+ "unsafe"
+)
+
+var floatDigits []int8
+
+const invalidCharForNumber = int8(-1)
+const endOfNumber = int8(-2)
+const dotInNumber = int8(-3)
+
+func init() {
+ floatDigits = make([]int8, 256)
+ for i := 0; i < len(floatDigits); i++ {
+ floatDigits[i] = invalidCharForNumber
+ }
+ for i := int8('0'); i <= int8('9'); i++ {
+ floatDigits[i] = i - int8('0')
+ }
+ floatDigits[','] = endOfNumber
+ floatDigits[']'] = endOfNumber
+ floatDigits['}'] = endOfNumber
+ floatDigits[' '] = endOfNumber
+ floatDigits['\t'] = endOfNumber
+ floatDigits['\n'] = endOfNumber
+ floatDigits['.'] = dotInNumber
+}
+
+// ReadBigFloat read big.Float
+func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil
+ }
+ prec := 64
+ if len(str) > prec {
+ prec = len(str)
+ }
+ val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
+ if err != nil {
+ iter.Error = err
+ return nil
+ }
+ return val
+}
+
+// ReadBigInt read big.Int
+func (iter *Iterator) ReadBigInt() (ret *big.Int) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return nil
+ }
+ ret = big.NewInt(0)
+ var success bool
+ ret, success = ret.SetString(str, 10)
+ if !success {
+ iter.ReportError("ReadBigInt", "invalid big int")
+ return nil
+ }
+ return ret
+}
+
+//ReadFloat32 read float32
+func (iter *Iterator) ReadFloat32() (ret float32) {
+ c := iter.nextToken()
+ if c == '-' {
+ return -iter.readPositiveFloat32()
+ }
+ iter.unreadByte()
+ return iter.readPositiveFloat32()
+}
+
+func (iter *Iterator) readPositiveFloat32() (ret float32) {
+ i := iter.head
+ // first char
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ c := iter.buf[i]
+ i++
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat32SlowPath()
+ case endOfNumber:
+ iter.ReportError("readFloat32", "empty number")
+ return
+ case dotInNumber:
+ iter.ReportError("readFloat32", "leading dot is invalid")
+ return
+ case 0:
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ c = iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.ReportError("readFloat32", "leading zero is invalid")
+ return
+ }
+ }
+ value := uint64(ind)
+ // chars before dot
+non_decimal_loop:
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat32SlowPath()
+ case endOfNumber:
+ iter.head = i
+ return float32(value)
+ case dotInNumber:
+ break non_decimal_loop
+ }
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat32SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+ }
+ // chars after dot
+ if c == '.' {
+ i++
+ decimalPlaces := 0
+ if i == iter.tail {
+ return iter.readFloat32SlowPath()
+ }
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case endOfNumber:
+ if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+ iter.head = i
+ return float32(float64(value) / float64(pow10[decimalPlaces]))
+ }
+ // too many decimal places
+ return iter.readFloat32SlowPath()
+ case invalidCharForNumber, dotInNumber:
+ return iter.readFloat32SlowPath()
+ }
+ decimalPlaces++
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat32SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ }
+ return iter.readFloat32SlowPath()
+}
+
+func (iter *Iterator) readNumberAsString() (ret string) {
+ strBuf := [16]byte{}
+ str := strBuf[0:0]
+load_loop:
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ str = append(str, c)
+ continue
+ default:
+ iter.head = i
+ break load_loop
+ }
+ }
+ if !iter.loadMore() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ if len(str) == 0 {
+ iter.ReportError("readNumberAsString", "invalid number")
+ }
+ return *(*string)(unsafe.Pointer(&str))
+}
+
+func (iter *Iterator) readFloat32SlowPath() (ret float32) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ errMsg := validateFloat(str)
+ if errMsg != "" {
+ iter.ReportError("readFloat32SlowPath", errMsg)
+ return
+ }
+ val, err := strconv.ParseFloat(str, 32)
+ if err != nil {
+ iter.Error = err
+ return
+ }
+ return float32(val)
+}
+
+// ReadFloat64 read float64
+func (iter *Iterator) ReadFloat64() (ret float64) {
+ c := iter.nextToken()
+ if c == '-' {
+ return -iter.readPositiveFloat64()
+ }
+ iter.unreadByte()
+ return iter.readPositiveFloat64()
+}
+
+func (iter *Iterator) readPositiveFloat64() (ret float64) {
+ i := iter.head
+ // first char
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ c := iter.buf[i]
+ i++
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat64SlowPath()
+ case endOfNumber:
+ iter.ReportError("readFloat64", "empty number")
+ return
+ case dotInNumber:
+ iter.ReportError("readFloat64", "leading dot is invalid")
+ return
+ case 0:
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ c = iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.ReportError("readFloat64", "leading zero is invalid")
+ return
+ }
+ }
+ value := uint64(ind)
+ // chars before dot
+non_decimal_loop:
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case invalidCharForNumber:
+ return iter.readFloat64SlowPath()
+ case endOfNumber:
+ iter.head = i
+ return float64(value)
+ case dotInNumber:
+ break non_decimal_loop
+ }
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat64SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+ }
+ // chars after dot
+ if c == '.' {
+ i++
+ decimalPlaces := 0
+ if i == iter.tail {
+ return iter.readFloat64SlowPath()
+ }
+ for ; i < iter.tail; i++ {
+ c = iter.buf[i]
+ ind := floatDigits[c]
+ switch ind {
+ case endOfNumber:
+ if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+ iter.head = i
+ return float64(value) / float64(pow10[decimalPlaces])
+ }
+ // too many decimal places
+ return iter.readFloat64SlowPath()
+ case invalidCharForNumber, dotInNumber:
+ return iter.readFloat64SlowPath()
+ }
+ decimalPlaces++
+ if value > uint64SafeToMultiple10 {
+ return iter.readFloat64SlowPath()
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ }
+ return iter.readFloat64SlowPath()
+}
+
+func (iter *Iterator) readFloat64SlowPath() (ret float64) {
+ str := iter.readNumberAsString()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ errMsg := validateFloat(str)
+ if errMsg != "" {
+ iter.ReportError("readFloat64SlowPath", errMsg)
+ return
+ }
+ val, err := strconv.ParseFloat(str, 64)
+ if err != nil {
+ iter.Error = err
+ return
+ }
+ return val
+}
+
+func validateFloat(str string) string {
+ // strconv.ParseFloat is not validating `1.` or `1.e1`
+ if len(str) == 0 {
+ return "empty number"
+ }
+ if str[0] == '-' {
+ return "-- is not valid"
+ }
+ dotPos := strings.IndexByte(str, '.')
+ if dotPos != -1 {
+ if dotPos == len(str)-1 {
+ return "dot can not be last character"
+ }
+ switch str[dotPos+1] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ return "missing digit after dot"
+ }
+ }
+ return ""
+}
+
+// ReadNumber read json.Number
+func (iter *Iterator) ReadNumber() (ret json.Number) {
+ return json.Number(iter.readNumberAsString())
+}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
new file mode 100644
index 0000000..2142320
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_int.go
@@ -0,0 +1,345 @@
+package jsoniter
+
+import (
+ "math"
+ "strconv"
+)
+
+var intDigits []int8
+
+const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
+const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
+
+func init() {
+ intDigits = make([]int8, 256)
+ for i := 0; i < len(intDigits); i++ {
+ intDigits[i] = invalidCharForNumber
+ }
+ for i := int8('0'); i <= int8('9'); i++ {
+ intDigits[i] = i - int8('0')
+ }
+}
+
+// ReadUint read uint
+func (iter *Iterator) ReadUint() uint {
+ if strconv.IntSize == 32 {
+ return uint(iter.ReadUint32())
+ }
+ return uint(iter.ReadUint64())
+}
+
+// ReadInt read int
+func (iter *Iterator) ReadInt() int {
+ if strconv.IntSize == 32 {
+ return int(iter.ReadInt32())
+ }
+ return int(iter.ReadInt64())
+}
+
+// ReadInt8 read int8
+func (iter *Iterator) ReadInt8() (ret int8) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt8+1 {
+ iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int8(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt8 {
+ iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int8(val)
+}
+
+// ReadUint8 read uint8
+func (iter *Iterator) ReadUint8() (ret uint8) {
+ val := iter.readUint32(iter.nextToken())
+ if val > math.MaxUint8 {
+ iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return uint8(val)
+}
+
+// ReadInt16 read int16
+func (iter *Iterator) ReadInt16() (ret int16) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt16+1 {
+ iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int16(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt16 {
+ iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int16(val)
+}
+
+// ReadUint16 read uint16
+func (iter *Iterator) ReadUint16() (ret uint16) {
+ val := iter.readUint32(iter.nextToken())
+ if val > math.MaxUint16 {
+ iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return uint16(val)
+}
+
+// ReadInt32 read int32
+func (iter *Iterator) ReadInt32() (ret int32) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint32(iter.readByte())
+ if val > math.MaxInt32+1 {
+ iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return -int32(val)
+ }
+ val := iter.readUint32(c)
+ if val > math.MaxInt32 {
+ iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+ return
+ }
+ return int32(val)
+}
+
+// ReadUint32 read uint32
+func (iter *Iterator) ReadUint32() (ret uint32) {
+ return iter.readUint32(iter.nextToken())
+}
+
+func (iter *Iterator) readUint32(c byte) (ret uint32) {
+ ind := intDigits[c]
+ if ind == 0 {
+ iter.assertInteger()
+ return 0 // single zero
+ }
+ if ind == invalidCharForNumber {
+ iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
+ return
+ }
+ value := uint32(ind)
+ if iter.tail-iter.head > 10 {
+ i := iter.head
+ ind2 := intDigits[iter.buf[i]]
+ if ind2 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ i++
+ ind3 := intDigits[iter.buf[i]]
+ if ind3 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10 + uint32(ind2)
+ }
+ //iter.head = i + 1
+ //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+ i++
+ ind4 := intDigits[iter.buf[i]]
+ if ind4 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100 + uint32(ind2)*10 + uint32(ind3)
+ }
+ i++
+ ind5 := intDigits[iter.buf[i]]
+ if ind5 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
+ }
+ i++
+ ind6 := intDigits[iter.buf[i]]
+ if ind6 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
+ }
+ i++
+ ind7 := intDigits[iter.buf[i]]
+ if ind7 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
+ }
+ i++
+ ind8 := intDigits[iter.buf[i]]
+ if ind8 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
+ }
+ i++
+ ind9 := intDigits[iter.buf[i]]
+ value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
+ iter.head = i
+ if ind9 == invalidCharForNumber {
+ iter.assertInteger()
+ return value
+ }
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ ind = intDigits[iter.buf[i]]
+ if ind == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ if value > uint32SafeToMultiply10 {
+ value2 := (value << 3) + (value << 1) + uint32(ind)
+ if value2 < value {
+ iter.ReportError("readUint32", "overflow")
+ return
+ }
+ value = value2
+ continue
+ }
+ value = (value << 3) + (value << 1) + uint32(ind)
+ }
+ if !iter.loadMore() {
+ iter.assertInteger()
+ return value
+ }
+ }
+}
+
+// ReadInt64 read int64
+func (iter *Iterator) ReadInt64() (ret int64) {
+ c := iter.nextToken()
+ if c == '-' {
+ val := iter.readUint64(iter.readByte())
+ if val > math.MaxInt64+1 {
+ iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+ return
+ }
+ return -int64(val)
+ }
+ val := iter.readUint64(c)
+ if val > math.MaxInt64 {
+ iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+ return
+ }
+ return int64(val)
+}
+
+// ReadUint64 read uint64
+func (iter *Iterator) ReadUint64() uint64 {
+ return iter.readUint64(iter.nextToken())
+}
+
+func (iter *Iterator) readUint64(c byte) (ret uint64) {
+ ind := intDigits[c]
+ if ind == 0 {
+ iter.assertInteger()
+ return 0 // single zero
+ }
+ if ind == invalidCharForNumber {
+ iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
+ return
+ }
+ value := uint64(ind)
+ if iter.tail-iter.head > 10 {
+ i := iter.head
+ ind2 := intDigits[iter.buf[i]]
+ if ind2 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ i++
+ ind3 := intDigits[iter.buf[i]]
+ if ind3 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10 + uint64(ind2)
+ }
+ //iter.head = i + 1
+ //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+ i++
+ ind4 := intDigits[iter.buf[i]]
+ if ind4 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100 + uint64(ind2)*10 + uint64(ind3)
+ }
+ i++
+ ind5 := intDigits[iter.buf[i]]
+ if ind5 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
+ }
+ i++
+ ind6 := intDigits[iter.buf[i]]
+ if ind6 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
+ }
+ i++
+ ind7 := intDigits[iter.buf[i]]
+ if ind7 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
+ }
+ i++
+ ind8 := intDigits[iter.buf[i]]
+ if ind8 == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
+ }
+ i++
+ ind9 := intDigits[iter.buf[i]]
+ value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
+ iter.head = i
+ if ind9 == invalidCharForNumber {
+ iter.assertInteger()
+ return value
+ }
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ ind = intDigits[iter.buf[i]]
+ if ind == invalidCharForNumber {
+ iter.head = i
+ iter.assertInteger()
+ return value
+ }
+ if value > uint64SafeToMultiple10 {
+ value2 := (value << 3) + (value << 1) + uint64(ind)
+ if value2 < value {
+ iter.ReportError("readUint64", "overflow")
+ return
+ }
+ value = value2
+ continue
+ }
+ value = (value << 3) + (value << 1) + uint64(ind)
+ }
+ if !iter.loadMore() {
+ iter.assertInteger()
+ return value
+ }
+ }
+}
+
+func (iter *Iterator) assertInteger() {
+ if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
+ iter.ReportError("assertInteger", "can not decode float as int")
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
new file mode 100644
index 0000000..b651371
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -0,0 +1,267 @@
+package jsoniter
+
+import (
+ "fmt"
+ "strings"
+)
+
+// ReadObject read one field from object.
+// If object ended, returns empty string.
+// Otherwise, returns the field name.
+func (iter *Iterator) ReadObject() (ret string) {
+ c := iter.nextToken()
+ switch c {
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l')
+ return "" // null
+ case '{':
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field := iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ return field
+ }
+ if c == '}' {
+ return "" // end of object
+ }
+ iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
+ return
+ case ',':
+ field := iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ return field
+ case '}':
+ return "" // end of object
+ default:
+ iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
+ return
+ }
+}
+
+// CaseInsensitive
+func (iter *Iterator) readFieldHash() int64 {
+ hash := int64(0x811c9dc5)
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
+ return 0
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ // require ascii string and no escape
+ b := iter.buf[i]
+ if b == '\\' {
+ iter.head = i
+ for _, b := range iter.readStringSlowPath() {
+ if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+ b += 'a' - 'A'
+ }
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+ return 0
+ }
+ return hash
+ }
+ if b == '"' {
+ iter.head = i + 1
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+ return 0
+ }
+ return hash
+ }
+ if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+ b += 'a' - 'A'
+ }
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ if !iter.loadMore() {
+ iter.ReportError("readFieldHash", `incomplete field name`)
+ return 0
+ }
+ }
+}
+
+func calcHash(str string, caseSensitive bool) int64 {
+ if !caseSensitive {
+ str = strings.ToLower(str)
+ }
+ hash := int64(0x811c9dc5)
+ for _, b := range []byte(str) {
+ hash ^= int64(b)
+ hash *= 0x1000193
+ }
+ return int64(hash)
+}
+
+// ReadObjectCB read object with callback, the key is ascii only and field name not copied
+func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
+ c := iter.nextToken()
+ var field string
+ if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field = iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ field = iter.ReadString()
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != '}' {
+ iter.ReportError("ReadObjectCB", `object not ended with }`)
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ if c == '}' {
+ return iter.decrementDepth()
+ }
+ iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
+ return false
+}
+
+// ReadMapCB read map with callback, the key can be any string
+func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
+ c := iter.nextToken()
+ if c == '{' {
+ if !iter.incrementDepth() {
+ return false
+ }
+ c = iter.nextToken()
+ if c == '"' {
+ iter.unreadByte()
+ field := iter.ReadString()
+ if iter.nextToken() != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ for c == ',' {
+ field = iter.ReadString()
+ if iter.nextToken() != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if !callback(iter, field) {
+ iter.decrementDepth()
+ return false
+ }
+ c = iter.nextToken()
+ }
+ if c != '}' {
+ iter.ReportError("ReadMapCB", `object not ended with }`)
+ iter.decrementDepth()
+ return false
+ }
+ return iter.decrementDepth()
+ }
+ if c == '}' {
+ return iter.decrementDepth()
+ }
+ iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ iter.decrementDepth()
+ return false
+ }
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return true // null
+ }
+ iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+ return false
+}
+
+func (iter *Iterator) readObjectStart() bool {
+ c := iter.nextToken()
+ if c == '{' {
+ c = iter.nextToken()
+ if c == '}' {
+ return false
+ }
+ iter.unreadByte()
+ return true
+ } else if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return false
+ }
+ iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
+ return false
+}
+
+func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
+ str := iter.ReadStringAsSlice()
+ if iter.skipWhitespacesWithoutLoadMore() {
+ if ret == nil {
+ ret = make([]byte, len(str))
+ copy(ret, str)
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+ if iter.buf[iter.head] != ':' {
+ iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
+ return
+ }
+ iter.head++
+ if iter.skipWhitespacesWithoutLoadMore() {
+ if ret == nil {
+ ret = make([]byte, len(str))
+ copy(ret, str)
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+ if ret == nil {
+ return str
+ }
+ return ret
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
new file mode 100644
index 0000000..e91eefb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip.go
@@ -0,0 +1,130 @@
+package jsoniter
+
+import "fmt"
+
+// ReadNil reads a json object as nil and
+// returns whether it's a nil or not
+func (iter *Iterator) ReadNil() (ret bool) {
+ c := iter.nextToken()
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ return true
+ }
+ iter.unreadByte()
+ return false
+}
+
+// ReadBool reads a json object as BoolValue
+func (iter *Iterator) ReadBool() (ret bool) {
+ c := iter.nextToken()
+ if c == 't' {
+ iter.skipThreeBytes('r', 'u', 'e')
+ return true
+ }
+ if c == 'f' {
+ iter.skipFourBytes('a', 'l', 's', 'e')
+ return false
+ }
+ iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
+ return
+}
+
+// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
+// The []byte can be kept, it is a copy of data.
+func (iter *Iterator) SkipAndReturnBytes() []byte {
+ iter.startCapture(iter.head)
+ iter.Skip()
+ return iter.stopCapture()
+}
+
+// SkipAndAppendBytes skips next JSON element and appends its content to
+// buffer, returning the result.
+func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte {
+ iter.startCaptureTo(buf, iter.head)
+ iter.Skip()
+ return iter.stopCapture()
+}
+
+func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) {
+ if iter.captured != nil {
+ panic("already in capture mode")
+ }
+ iter.captureStartedAt = captureStartedAt
+ iter.captured = buf
+}
+
+func (iter *Iterator) startCapture(captureStartedAt int) {
+ iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt)
+}
+
+func (iter *Iterator) stopCapture() []byte {
+ if iter.captured == nil {
+ panic("not in capture mode")
+ }
+ captured := iter.captured
+ remaining := iter.buf[iter.captureStartedAt:iter.head]
+ iter.captureStartedAt = -1
+ iter.captured = nil
+ return append(captured, remaining...)
+}
+
+// Skip skips a json object and positions to relatively the next json object
+func (iter *Iterator) Skip() {
+ c := iter.nextToken()
+ switch c {
+ case '"':
+ iter.skipString()
+ case 'n':
+ iter.skipThreeBytes('u', 'l', 'l') // null
+ case 't':
+ iter.skipThreeBytes('r', 'u', 'e') // true
+ case 'f':
+ iter.skipFourBytes('a', 'l', 's', 'e') // false
+ case '0':
+ iter.unreadByte()
+ iter.ReadFloat32()
+ case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ iter.skipNumber()
+ case '[':
+ iter.skipArray()
+ case '{':
+ iter.skipObject()
+ default:
+ iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
+ return
+ }
+}
+
+func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
+ if iter.readByte() != b1 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b2 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b3 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+ if iter.readByte() != b4 {
+ iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+ return
+ }
+}
+
+func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
+ if iter.readByte() != b1 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+ if iter.readByte() != b2 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+ if iter.readByte() != b3 {
+ iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
new file mode 100644
index 0000000..9303de4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -0,0 +1,163 @@
+//+build jsoniter_sloppy
+
+package jsoniter
+
+// sloppy but faster implementation, do not validate the input json
+
+func (iter *Iterator) skipNumber() {
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case ' ', '\n', '\r', '\t', ',', '}', ']':
+ iter.head = i
+ return
+ }
+ }
+ if !iter.loadMore() {
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipArray() {
+ level := 1
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ switch iter.buf[i] {
+ case '"': // If inside string, skip it
+ iter.head = i + 1
+ iter.skipString()
+ i = iter.head - 1 // it will be i++ soon
+ case '[': // If open symbol, increase level
+ level++
+ if !iter.incrementDepth() {
+ return
+ }
+ case ']': // If close symbol, increase level
+ level--
+ if !iter.decrementDepth() {
+ return
+ }
+
+ // If we have returned to the original level, we're done
+ if level == 0 {
+ iter.head = i + 1
+ return
+ }
+ }
+ }
+ if !iter.loadMore() {
+ iter.ReportError("skipObject", "incomplete array")
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipObject() {
+ level := 1
+ if !iter.incrementDepth() {
+ return
+ }
+
+ for {
+ for i := iter.head; i < iter.tail; i++ {
+ switch iter.buf[i] {
+ case '"': // If inside string, skip it
+ iter.head = i + 1
+ iter.skipString()
+ i = iter.head - 1 // it will be i++ soon
+ case '{': // If open symbol, increase level
+ level++
+ if !iter.incrementDepth() {
+ return
+ }
+ case '}': // If close symbol, increase level
+ level--
+ if !iter.decrementDepth() {
+ return
+ }
+
+ // If we have returned to the original level, we're done
+ if level == 0 {
+ iter.head = i + 1
+ return
+ }
+ }
+ }
+ if !iter.loadMore() {
+ iter.ReportError("skipObject", "incomplete object")
+ return
+ }
+ }
+}
+
+func (iter *Iterator) skipString() {
+ for {
+ end, escaped := iter.findStringEnd()
+ if end == -1 {
+ if !iter.loadMore() {
+ iter.ReportError("skipString", "incomplete string")
+ return
+ }
+ if escaped {
+ iter.head = 1 // skip the first char as last char read is \
+ }
+ } else {
+ iter.head = end
+ return
+ }
+ }
+}
+
+// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
+// Tries to find the end of string
+// Support if string contains escaped quote symbols.
+func (iter *Iterator) findStringEnd() (int, bool) {
+ escaped := false
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ if !escaped {
+ return i + 1, false
+ }
+ j := i - 1
+ for {
+ if j < iter.head || iter.buf[j] != '\\' {
+ // even number of backslashes
+ // either end of buffer, or " found
+ return i + 1, true
+ }
+ j--
+ if j < iter.head || iter.buf[j] != '\\' {
+ // odd number of backslashes
+ // it is \" or \\\"
+ break
+ }
+ j--
+ }
+ } else if c == '\\' {
+ escaped = true
+ }
+ }
+ j := iter.tail - 1
+ for {
+ if j < iter.head || iter.buf[j] != '\\' {
+ // even number of backslashes
+ // either end of buffer, or " found
+ return -1, false // do not end with \
+ }
+ j--
+ if j < iter.head || iter.buf[j] != '\\' {
+ // odd number of backslashes
+ // it is \" or \\\"
+ break
+ }
+ j--
+
+ }
+ return -1, true // end with \
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
new file mode 100644
index 0000000..6cf66d0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go
@@ -0,0 +1,99 @@
+//+build !jsoniter_sloppy
+
+package jsoniter
+
+import (
+ "fmt"
+ "io"
+)
+
+func (iter *Iterator) skipNumber() {
+ if !iter.trySkipNumber() {
+ iter.unreadByte()
+ if iter.Error != nil && iter.Error != io.EOF {
+ return
+ }
+ iter.ReadFloat64()
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = nil
+ iter.ReadBigFloat()
+ }
+ }
+}
+
+func (iter *Iterator) trySkipNumber() bool {
+ dotFound := false
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ case '.':
+ if dotFound {
+ iter.ReportError("validateNumber", `more than one dot found in number`)
+ return true // already failed
+ }
+ if i+1 == iter.tail {
+ return false
+ }
+ c = iter.buf[i+1]
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ default:
+ iter.ReportError("validateNumber", `missing digit after dot`)
+ return true // already failed
+ }
+ dotFound = true
+ default:
+ switch c {
+ case ',', ']', '}', ' ', '\t', '\n', '\r':
+ if iter.head == i {
+ return false // if - without following digits
+ }
+ iter.head = i
+ return true // must be valid
+ }
+ return false // may be invalid
+ }
+ }
+ return false
+}
+
+func (iter *Iterator) skipString() {
+ if !iter.trySkipString() {
+ iter.unreadByte()
+ iter.ReadString()
+ }
+}
+
+func (iter *Iterator) trySkipString() bool {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ iter.head = i + 1
+ return true // valid
+ } else if c == '\\' {
+ return false
+ } else if c < ' ' {
+ iter.ReportError("trySkipString",
+ fmt.Sprintf(`invalid control character found: %d`, c))
+ return true // already failed
+ }
+ }
+ return false
+}
+
+func (iter *Iterator) skipObject() {
+ iter.unreadByte()
+ iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+ iter.Skip()
+ return true
+ })
+}
+
+func (iter *Iterator) skipArray() {
+ iter.unreadByte()
+ iter.ReadArrayCB(func(iter *Iterator) bool {
+ iter.Skip()
+ return true
+ })
+}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
new file mode 100644
index 0000000..adc487e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_str.go
@@ -0,0 +1,215 @@
+package jsoniter
+
+import (
+ "fmt"
+ "unicode/utf16"
+)
+
+// ReadString read string from iterator
+func (iter *Iterator) ReadString() (ret string) {
+ c := iter.nextToken()
+ if c == '"' {
+ for i := iter.head; i < iter.tail; i++ {
+ c := iter.buf[i]
+ if c == '"' {
+ ret = string(iter.buf[iter.head:i])
+ iter.head = i + 1
+ return ret
+ } else if c == '\\' {
+ break
+ } else if c < ' ' {
+ iter.ReportError("ReadString",
+ fmt.Sprintf(`invalid control character found: %d`, c))
+ return
+ }
+ }
+ return iter.readStringSlowPath()
+ } else if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return ""
+ }
+ iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
+ return
+}
+
+func (iter *Iterator) readStringSlowPath() (ret string) {
+ var str []byte
+ var c byte
+ for iter.Error == nil {
+ c = iter.readByte()
+ if c == '"' {
+ return string(str)
+ }
+ if c == '\\' {
+ c = iter.readByte()
+ str = iter.readEscapedChar(c, str)
+ } else {
+ str = append(str, c)
+ }
+ }
+ iter.ReportError("readStringSlowPath", "unexpected end of input")
+ return
+}
+
+func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
+ switch c {
+ case 'u':
+ r := iter.readU4()
+ if utf16.IsSurrogate(r) {
+ c = iter.readByte()
+ if iter.Error != nil {
+ return nil
+ }
+ if c != '\\' {
+ iter.unreadByte()
+ str = appendRune(str, r)
+ return str
+ }
+ c = iter.readByte()
+ if iter.Error != nil {
+ return nil
+ }
+ if c != 'u' {
+ str = appendRune(str, r)
+ return iter.readEscapedChar(c, str)
+ }
+ r2 := iter.readU4()
+ if iter.Error != nil {
+ return nil
+ }
+ combined := utf16.DecodeRune(r, r2)
+ if combined == '\uFFFD' {
+ str = appendRune(str, r)
+ str = appendRune(str, r2)
+ } else {
+ str = appendRune(str, combined)
+ }
+ } else {
+ str = appendRune(str, r)
+ }
+ case '"':
+ str = append(str, '"')
+ case '\\':
+ str = append(str, '\\')
+ case '/':
+ str = append(str, '/')
+ case 'b':
+ str = append(str, '\b')
+ case 'f':
+ str = append(str, '\f')
+ case 'n':
+ str = append(str, '\n')
+ case 'r':
+ str = append(str, '\r')
+ case 't':
+ str = append(str, '\t')
+ default:
+ iter.ReportError("readEscapedChar",
+ `invalid escape char after \`)
+ return nil
+ }
+ return str
+}
+
+// ReadStringAsSlice read string from iterator without copying into string form.
+// The []byte can not be kept, as it will change after next iterator call.
+func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
+ c := iter.nextToken()
+ if c == '"' {
+ for i := iter.head; i < iter.tail; i++ {
+ // require ascii string and no escape
+ // for: field name, base64, number
+ if iter.buf[i] == '"' {
+ // fast path: reuse the underlying buffer
+ ret = iter.buf[iter.head:i]
+ iter.head = i + 1
+ return ret
+ }
+ }
+ readLen := iter.tail - iter.head
+ copied := make([]byte, readLen, readLen*2)
+ copy(copied, iter.buf[iter.head:iter.tail])
+ iter.head = iter.tail
+ for iter.Error == nil {
+ c := iter.readByte()
+ if c == '"' {
+ return copied
+ }
+ copied = append(copied, c)
+ }
+ return copied
+ }
+ iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
+ return
+}
+
+func (iter *Iterator) readU4() (ret rune) {
+ for i := 0; i < 4; i++ {
+ c := iter.readByte()
+ if iter.Error != nil {
+ return
+ }
+ if c >= '0' && c <= '9' {
+ ret = ret*16 + rune(c-'0')
+ } else if c >= 'a' && c <= 'f' {
+ ret = ret*16 + rune(c-'a'+10)
+ } else if c >= 'A' && c <= 'F' {
+ ret = ret*16 + rune(c-'A'+10)
+ } else {
+ iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
+ return
+ }
+ }
+ return ret
+}
+
+const (
+ t1 = 0x00 // 0000 0000
+ tx = 0x80 // 1000 0000
+ t2 = 0xC0 // 1100 0000
+ t3 = 0xE0 // 1110 0000
+ t4 = 0xF0 // 1111 0000
+ t5 = 0xF8 // 1111 1000
+
+ maskx = 0x3F // 0011 1111
+ mask2 = 0x1F // 0001 1111
+ mask3 = 0x0F // 0000 1111
+ mask4 = 0x07 // 0000 0111
+
+ rune1Max = 1<<7 - 1
+ rune2Max = 1<<11 - 1
+ rune3Max = 1<<16 - 1
+
+ surrogateMin = 0xD800
+ surrogateMax = 0xDFFF
+
+ maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
+ runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
+)
+
+func appendRune(p []byte, r rune) []byte {
+ // Negative values are erroneous. Making it unsigned addresses the problem.
+ switch i := uint32(r); {
+ case i <= rune1Max:
+ p = append(p, byte(r))
+ return p
+ case i <= rune2Max:
+ p = append(p, t2|byte(r>>6))
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ case i > maxRune, surrogateMin <= i && i <= surrogateMax:
+ r = runeError
+ fallthrough
+ case i <= rune3Max:
+ p = append(p, t3|byte(r>>12))
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ default:
+ p = append(p, t4|byte(r>>18))
+ p = append(p, tx|byte(r>>12)&maskx)
+ p = append(p, tx|byte(r>>6)&maskx)
+ p = append(p, tx|byte(r)&maskx)
+ return p
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
new file mode 100644
index 0000000..c2934f9
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/jsoniter.go
@@ -0,0 +1,18 @@
+// Package jsoniter implements encoding and decoding of JSON as defined in
+// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
+// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
+// and variable type declarations (if any).
+// jsoniter interfaces gives 100% compatibility with code using standard lib.
+//
+// "JSON and Go"
+// (https://golang.org/doc/articles/json_and_go.html)
+// gives a description of how Marshal/Unmarshal operate
+// between arbitrary or predefined json objects and bytes,
+// and it applies to jsoniter.Marshal/Unmarshal as well.
+//
+// Besides, jsoniter.Iterator provides a different set of interfaces
+// iterating given bytes/string/reader
+// and yielding parsed elements one by one.
+// This set of interfaces reads input as required and gives
+// better performance.
+package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
new file mode 100644
index 0000000..e2389b5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/pool.go
@@ -0,0 +1,42 @@
+package jsoniter
+
+import (
+ "io"
+)
+
+// IteratorPool a thread safe pool of iterators with same configuration
+type IteratorPool interface {
+ BorrowIterator(data []byte) *Iterator
+ ReturnIterator(iter *Iterator)
+}
+
+// StreamPool a thread safe pool of streams with same configuration
+type StreamPool interface {
+ BorrowStream(writer io.Writer) *Stream
+ ReturnStream(stream *Stream)
+}
+
+func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
+ stream := cfg.streamPool.Get().(*Stream)
+ stream.Reset(writer)
+ return stream
+}
+
+func (cfg *frozenConfig) ReturnStream(stream *Stream) {
+ stream.out = nil
+ stream.Error = nil
+ stream.Attachment = nil
+ cfg.streamPool.Put(stream)
+}
+
+func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
+ iter := cfg.iteratorPool.Get().(*Iterator)
+ iter.ResetBytes(data)
+ return iter
+}
+
+func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
+ iter.Error = nil
+ iter.Attachment = nil
+ cfg.iteratorPool.Put(iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
new file mode 100644
index 0000000..74974ba
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -0,0 +1,337 @@
+package jsoniter
+
+import (
+ "fmt"
+ "reflect"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+// ValDecoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValDecoder with json.Decoder.
+// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
+//
+// Reflection on type to create decoders, which is then cached
+// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
+// 1. create instance of new value, for example *int will need a int to be allocated
+// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
+// 3. assignment to map, both key and value will be reflect.Value
+// For a simple struct binding, it will be reflect.Value free and allocation free
+type ValDecoder interface {
+ Decode(ptr unsafe.Pointer, iter *Iterator)
+}
+
+// ValEncoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValEncoder with json.Encoder.
+// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
+type ValEncoder interface {
+ IsEmpty(ptr unsafe.Pointer) bool
+ Encode(ptr unsafe.Pointer, stream *Stream)
+}
+
+type checkIsEmpty interface {
+ IsEmpty(ptr unsafe.Pointer) bool
+}
+
+type ctx struct {
+ *frozenConfig
+ prefix string
+ encoders map[reflect2.Type]ValEncoder
+ decoders map[reflect2.Type]ValDecoder
+}
+
+func (b *ctx) caseSensitive() bool {
+ if b.frozenConfig == nil {
+ // default is case-insensitive
+ return false
+ }
+ return b.frozenConfig.caseSensitive
+}
+
+func (b *ctx) append(prefix string) *ctx {
+ return &ctx{
+ frozenConfig: b.frozenConfig,
+ prefix: b.prefix + " " + prefix,
+ encoders: b.encoders,
+ decoders: b.decoders,
+ }
+}
+
+// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
+func (iter *Iterator) ReadVal(obj interface{}) {
+ depth := iter.depth
+ cacheKey := reflect2.RTypeOf(obj)
+ decoder := iter.cfg.getDecoderFromCache(cacheKey)
+ if decoder == nil {
+ typ := reflect2.TypeOf(obj)
+ if typ.Kind() != reflect.Ptr {
+ iter.ReportError("ReadVal", "can only unmarshal into pointer")
+ return
+ }
+ decoder = iter.cfg.DecoderOf(typ)
+ }
+ ptr := reflect2.PtrOf(obj)
+ if ptr == nil {
+ iter.ReportError("ReadVal", "can not read into nil pointer")
+ return
+ }
+ decoder.Decode(ptr, iter)
+ if iter.depth != depth {
+ iter.ReportError("ReadVal", "unexpected mismatched nesting")
+ return
+ }
+}
+
+// WriteVal copy the go interface into underlying JSON, same as json.Marshal
+func (stream *Stream) WriteVal(val interface{}) {
+ if nil == val {
+ stream.WriteNil()
+ return
+ }
+ cacheKey := reflect2.RTypeOf(val)
+ encoder := stream.cfg.getEncoderFromCache(cacheKey)
+ if encoder == nil {
+ typ := reflect2.TypeOf(val)
+ encoder = stream.cfg.EncoderOf(typ)
+ }
+ encoder.Encode(reflect2.PtrOf(val), stream)
+}
+
+func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
+ cacheKey := typ.RType()
+ decoder := cfg.getDecoderFromCache(cacheKey)
+ if decoder != nil {
+ return decoder
+ }
+ ctx := &ctx{
+ frozenConfig: cfg,
+ prefix: "",
+ decoders: map[reflect2.Type]ValDecoder{},
+ encoders: map[reflect2.Type]ValEncoder{},
+ }
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ decoder = decoderOfType(ctx, ptrType.Elem())
+ cfg.addDecoderToCache(cacheKey, decoder)
+ return decoder
+}
+
+func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := getTypeDecoderFromExtension(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfType(ctx, typ)
+ for _, extension := range extensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+ for _, extension := range ctx.extraExtensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ return decoder
+}
+
+func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := ctx.decoders[typ]
+ if decoder != nil {
+ return decoder
+ }
+ placeholder := &placeholderDecoder{}
+ ctx.decoders[typ] = placeholder
+ decoder = _createDecoderOfType(ctx, typ)
+ placeholder.decoder = decoder
+ return decoder
+}
+
+func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := createDecoderOfJsonRawMessage(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfJsonNumber(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfMarshaler(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfAny(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ decoder = createDecoderOfNative(ctx, typ)
+ if decoder != nil {
+ return decoder
+ }
+ switch typ.Kind() {
+ case reflect.Interface:
+ ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
+ if isIFace {
+ return &ifaceDecoder{valType: ifaceType}
+ }
+ return &efaceDecoder{}
+ case reflect.Struct:
+ return decoderOfStruct(ctx, typ)
+ case reflect.Array:
+ return decoderOfArray(ctx, typ)
+ case reflect.Slice:
+ return decoderOfSlice(ctx, typ)
+ case reflect.Map:
+ return decoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return decoderOfOptional(ctx, typ)
+ default:
+ return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+ }
+}
+
+func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
+ cacheKey := typ.RType()
+ encoder := cfg.getEncoderFromCache(cacheKey)
+ if encoder != nil {
+ return encoder
+ }
+ ctx := &ctx{
+ frozenConfig: cfg,
+ prefix: "",
+ decoders: map[reflect2.Type]ValDecoder{},
+ encoders: map[reflect2.Type]ValEncoder{},
+ }
+ encoder = encoderOfType(ctx, typ)
+ if typ.LikePtr() {
+ encoder = &onePtrEncoder{encoder}
+ }
+ cfg.addEncoderToCache(cacheKey, encoder)
+ return encoder
+}
+
+type onePtrEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := getTypeEncoderFromExtension(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfType(ctx, typ)
+ for _, extension := range extensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+ for _, extension := range ctx.extraExtensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ return encoder
+}
+
+func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := ctx.encoders[typ]
+ if encoder != nil {
+ return encoder
+ }
+ placeholder := &placeholderEncoder{}
+ ctx.encoders[typ] = placeholder
+ encoder = _createEncoderOfType(ctx, typ)
+ placeholder.encoder = encoder
+ return encoder
+}
+func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := createEncoderOfJsonRawMessage(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfJsonNumber(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfMarshaler(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfAny(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ encoder = createEncoderOfNative(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Interface:
+ return &dynamicEncoder{typ}
+ case reflect.Struct:
+ return encoderOfStruct(ctx, typ)
+ case reflect.Array:
+ return encoderOfArray(ctx, typ)
+ case reflect.Slice:
+ return encoderOfSlice(ctx, typ)
+ case reflect.Map:
+ return encoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return encoderOfOptional(ctx, typ)
+ default:
+ return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+ }
+}
+
+type lazyErrorDecoder struct {
+ err error
+}
+
+func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.WhatIsNext() != NilValue {
+ if iter.Error == nil {
+ iter.Error = decoder.err
+ }
+ } else {
+ iter.Skip()
+ }
+}
+
+type lazyErrorEncoder struct {
+ err error
+}
+
+func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if ptr == nil {
+ stream.WriteNil()
+ } else if stream.Error == nil {
+ stream.Error = encoder.err
+ }
+}
+
+func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type placeholderDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.decoder.Decode(ptr, iter)
+}
+
+type placeholderEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(ptr, stream)
+}
+
+func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
new file mode 100644
index 0000000..13a0b7b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_array.go
@@ -0,0 +1,104 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "unsafe"
+)
+
+func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
+ arrayType := typ.(*reflect2.UnsafeArrayType)
+ decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+ return &arrayDecoder{arrayType, decoder}
+}
+
+func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
+ arrayType := typ.(*reflect2.UnsafeArrayType)
+ if arrayType.Len() == 0 {
+ return emptyArrayEncoder{}
+ }
+ encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+ return &arrayEncoder{arrayType, encoder}
+}
+
+type emptyArrayEncoder struct{}
+
+func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteEmptyArray()
+}
+
+func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return true
+}
+
+type arrayEncoder struct {
+ arrayType *reflect2.UnsafeArrayType
+ elemEncoder ValEncoder
+}
+
+func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteArrayStart()
+ elemPtr := unsafe.Pointer(ptr)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ for i := 1; i < encoder.arrayType.Len(); i++ {
+ stream.WriteMore()
+ elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ }
+ stream.WriteArrayEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
+ }
+}
+
+func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type arrayDecoder struct {
+ arrayType *reflect2.UnsafeArrayType
+ elemDecoder ValDecoder
+}
+
+func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.doDecode(ptr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
+ }
+}
+
+func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ arrayType := decoder.arrayType
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ return
+ }
+ if c != '[' {
+ iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == ']' {
+ return
+ }
+ iter.unreadByte()
+ elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ length := 1
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ if length >= arrayType.Len() {
+ iter.Skip()
+ continue
+ }
+ idx := length
+ length += 1
+ elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ }
+ if c != ']' {
+ iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
new file mode 100644
index 0000000..8b6bc8b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go
@@ -0,0 +1,70 @@
+package jsoniter
+
+import (
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "unsafe"
+)
+
+type dynamicEncoder struct {
+ valType reflect2.Type
+}
+
+func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ stream.WriteVal(obj)
+}
+
+func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.valType.UnsafeIndirect(ptr) == nil
+}
+
+type efaceDecoder struct {
+}
+
+func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ pObj := (*interface{})(ptr)
+ obj := *pObj
+ if obj == nil {
+ *pObj = iter.Read()
+ return
+ }
+ typ := reflect2.TypeOf(obj)
+ if typ.Kind() != reflect.Ptr {
+ *pObj = iter.Read()
+ return
+ }
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ ptrElemType := ptrType.Elem()
+ if iter.WhatIsNext() == NilValue {
+ if ptrElemType.Kind() != reflect.Ptr {
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *pObj = nil
+ return
+ }
+ }
+ if reflect2.IsNil(obj) {
+ obj := ptrElemType.New()
+ iter.ReadVal(obj)
+ *pObj = obj
+ return
+ }
+ iter.ReadVal(obj)
+}
+
+type ifaceDecoder struct {
+ valType *reflect2.UnsafeIFaceType
+}
+
+func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
+ return
+ }
+ obj := decoder.valType.UnsafeIndirect(ptr)
+ if reflect2.IsNil(obj) {
+ iter.ReportError("decode non empty interface", "can not unmarshal into nil")
+ return
+ }
+ iter.ReadVal(obj)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
new file mode 100644
index 0000000..80320cd
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -0,0 +1,483 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode"
+ "unsafe"
+)
+
+var typeDecoders = map[string]ValDecoder{}
+var fieldDecoders = map[string]ValDecoder{}
+var typeEncoders = map[string]ValEncoder{}
+var fieldEncoders = map[string]ValEncoder{}
+var extensions = []Extension{}
+
+// StructDescriptor describe how should we encode/decode the struct
+type StructDescriptor struct {
+ Type reflect2.Type
+ Fields []*Binding
+}
+
+// GetField get one field from the descriptor by its name.
+// Can not use map here to keep field orders.
+func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
+ for _, binding := range structDescriptor.Fields {
+ if binding.Field.Name() == fieldName {
+ return binding
+ }
+ }
+ return nil
+}
+
+// Binding describe how should we encode/decode the struct field
+type Binding struct {
+ levels []int
+ Field reflect2.StructField
+ FromNames []string
+ ToNames []string
+ Encoder ValEncoder
+ Decoder ValDecoder
+}
+
+// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
+// Can also rename fields by UpdateStructDescriptor.
+type Extension interface {
+ UpdateStructDescriptor(structDescriptor *StructDescriptor)
+ CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
+ CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
+ CreateDecoder(typ reflect2.Type) ValDecoder
+ CreateEncoder(typ reflect2.Type) ValEncoder
+ DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
+ DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
+}
+
+// DummyExtension embed this type get dummy implementation for all methods of Extension
+type DummyExtension struct {
+}
+
+// UpdateStructDescriptor No-op
+func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// CreateDecoder No-op
+func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateEncoder No-op
+func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type EncoderExtension map[reflect2.Type]ValEncoder
+
+// UpdateStructDescriptor No-op
+func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateDecoder No-op
+func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateEncoder get encoder from map
+func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return extension[typ]
+}
+
+// CreateMapKeyDecoder No-op
+func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type DecoderExtension map[reflect2.Type]ValDecoder
+
+// UpdateStructDescriptor No-op
+func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+ return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// CreateDecoder get decoder from map
+func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+ return extension[typ]
+}
+
+// CreateEncoder No-op
+func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+ return nil
+}
+
+// DecorateDecoder No-op
+func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+ return decoder
+}
+
+// DecorateEncoder No-op
+func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+ return encoder
+}
+
+type funcDecoder struct {
+ fun DecoderFunc
+}
+
+func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.fun(ptr, iter)
+}
+
+type funcEncoder struct {
+ fun EncoderFunc
+ isEmptyFunc func(ptr unsafe.Pointer) bool
+}
+
+func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.fun(ptr, stream)
+}
+
+func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ if encoder.isEmptyFunc == nil {
+ return false
+ }
+ return encoder.isEmptyFunc(ptr)
+}
+
+// DecoderFunc the function form of TypeDecoder
+type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
+
+// EncoderFunc the function form of TypeEncoder
+type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
+
+// RegisterTypeDecoderFunc register TypeDecoder for a type with function
+func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
+ typeDecoders[typ] = &funcDecoder{fun}
+}
+
+// RegisterTypeDecoder register TypeDecoder for a typ
+func RegisterTypeDecoder(typ string, decoder ValDecoder) {
+ typeDecoders[typ] = decoder
+}
+
+// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
+func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
+ RegisterFieldDecoder(typ, field, &funcDecoder{fun})
+}
+
+// RegisterFieldDecoder register TypeDecoder for a struct field
+func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
+ fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
+}
+
+// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
+func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+ typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
+}
+
+// RegisterTypeEncoder register TypeEncoder for a type
+func RegisterTypeEncoder(typ string, encoder ValEncoder) {
+ typeEncoders[typ] = encoder
+}
+
+// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
+func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+ RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
+}
+
+// RegisterFieldEncoder register TypeEncoder for a struct field
+func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
+ fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
+}
+
+// RegisterExtension register extension
+func RegisterExtension(extension Extension) {
+ extensions = append(extensions, extension)
+}
+
+func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := _getTypeDecoderFromExtension(ctx, typ)
+ if decoder != nil {
+ for _, extension := range extensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+ for _, extension := range ctx.extraExtensions {
+ decoder = extension.DecorateDecoder(typ, decoder)
+ }
+ }
+ return decoder
+}
+func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+ for _, extension := range extensions {
+ decoder := extension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ decoder := ctx.decoderExtension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ decoder := extension.CreateDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ typeName := typ.String()
+ decoder = typeDecoders[typeName]
+ if decoder != nil {
+ return decoder
+ }
+ if typ.Kind() == reflect.Ptr {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ decoder := typeDecoders[ptrType.Elem().String()]
+ if decoder != nil {
+ return &OptionalDecoder{ptrType.Elem(), decoder}
+ }
+ }
+ return nil
+}
+
+func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := _getTypeEncoderFromExtension(ctx, typ)
+ if encoder != nil {
+ for _, extension := range extensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+ for _, extension := range ctx.extraExtensions {
+ encoder = extension.DecorateEncoder(typ, encoder)
+ }
+ }
+ return encoder
+}
+
+func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+ for _, extension := range extensions {
+ encoder := extension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ encoder := ctx.encoderExtension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ encoder := extension.CreateEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ typeName := typ.String()
+ encoder = typeEncoders[typeName]
+ if encoder != nil {
+ return encoder
+ }
+ if typ.Kind() == reflect.Ptr {
+ typePtr := typ.(*reflect2.UnsafePtrType)
+ encoder := typeEncoders[typePtr.Elem().String()]
+ if encoder != nil {
+ return &OptionalEncoder{encoder}
+ }
+ }
+ return nil
+}
+
+func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
+ structType := typ.(*reflect2.UnsafeStructType)
+ embeddedBindings := []*Binding{}
+ bindings := []*Binding{}
+ for i := 0; i < structType.NumField(); i++ {
+ field := structType.Field(i)
+ tag, hastag := field.Tag().Lookup(ctx.getTagKey())
+ if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
+ continue
+ }
+ if tag == "-" || field.Name() == "_" {
+ continue
+ }
+ tagParts := strings.Split(tag, ",")
+ if field.Anonymous() && (tag == "" || tagParts[0] == "") {
+ if field.Type().Kind() == reflect.Struct {
+ structDescriptor := describeStruct(ctx, field.Type())
+ for _, binding := range structDescriptor.Fields {
+ binding.levels = append([]int{i}, binding.levels...)
+ omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+ binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+ binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+ embeddedBindings = append(embeddedBindings, binding)
+ }
+ continue
+ } else if field.Type().Kind() == reflect.Ptr {
+ ptrType := field.Type().(*reflect2.UnsafePtrType)
+ if ptrType.Elem().Kind() == reflect.Struct {
+ structDescriptor := describeStruct(ctx, ptrType.Elem())
+ for _, binding := range structDescriptor.Fields {
+ binding.levels = append([]int{i}, binding.levels...)
+ omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+ binding.Encoder = &dereferenceEncoder{binding.Encoder}
+ binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+ binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
+ binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+ embeddedBindings = append(embeddedBindings, binding)
+ }
+ continue
+ }
+ }
+ }
+ fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
+ fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
+ decoder := fieldDecoders[fieldCacheKey]
+ if decoder == nil {
+ decoder = decoderOfType(ctx.append(field.Name()), field.Type())
+ }
+ encoder := fieldEncoders[fieldCacheKey]
+ if encoder == nil {
+ encoder = encoderOfType(ctx.append(field.Name()), field.Type())
+ }
+ binding := &Binding{
+ Field: field,
+ FromNames: fieldNames,
+ ToNames: fieldNames,
+ Decoder: decoder,
+ Encoder: encoder,
+ }
+ binding.levels = []int{i}
+ bindings = append(bindings, binding)
+ }
+ return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
+}
+func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
+ structDescriptor := &StructDescriptor{
+ Type: typ,
+ Fields: bindings,
+ }
+ for _, extension := range extensions {
+ extension.UpdateStructDescriptor(structDescriptor)
+ }
+ ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
+ ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
+ for _, extension := range ctx.extraExtensions {
+ extension.UpdateStructDescriptor(structDescriptor)
+ }
+ processTags(structDescriptor, ctx.frozenConfig)
+ // merge normal & embedded bindings & sort with original order
+ allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
+ sort.Sort(allBindings)
+ structDescriptor.Fields = allBindings
+ return structDescriptor
+}
+
+type sortableBindings []*Binding
+
+func (bindings sortableBindings) Len() int {
+ return len(bindings)
+}
+
+func (bindings sortableBindings) Less(i, j int) bool {
+ left := bindings[i].levels
+ right := bindings[j].levels
+ k := 0
+ for {
+ if left[k] < right[k] {
+ return true
+ } else if left[k] > right[k] {
+ return false
+ }
+ k++
+ }
+}
+
+func (bindings sortableBindings) Swap(i, j int) {
+ bindings[i], bindings[j] = bindings[j], bindings[i]
+}
+
+func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
+ for _, binding := range structDescriptor.Fields {
+ shouldOmitEmpty := false
+ tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
+ for _, tagPart := range tagParts[1:] {
+ if tagPart == "omitempty" {
+ shouldOmitEmpty = true
+ } else if tagPart == "string" {
+ if binding.Field.Type().Kind() == reflect.String {
+ binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
+ binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
+ } else {
+ binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
+ binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
+ }
+ }
+ }
+ binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
+ binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
+ }
+}
+
+func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
+ // ignore?
+ if wholeTag == "-" {
+ return []string{}
+ }
+ // rename?
+ var fieldNames []string
+ if tagProvidedFieldName == "" {
+ fieldNames = []string{originalFieldName}
+ } else {
+ fieldNames = []string{tagProvidedFieldName}
+ }
+ // private?
+ isNotExported := unicode.IsLower(rune(originalFieldName[0]))
+ if isNotExported {
+ fieldNames = []string{}
+ }
+ return fieldNames
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
new file mode 100644
index 0000000..98d45c1
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_number.go
@@ -0,0 +1,112 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "strconv"
+ "unsafe"
+)
+
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+func CastJsonNumber(val interface{}) (string, bool) {
+ switch typedVal := val.(type) {
+ case json.Number:
+ return string(typedVal), true
+ case Number:
+ return string(typedVal), true
+ }
+ return "", false
+}
+
+var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
+var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
+
+func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ.AssignableTo(jsonNumberType) {
+ return &jsonNumberCodec{}
+ }
+ if typ.AssignableTo(jsoniterNumberType) {
+ return &jsoniterNumberCodec{}
+ }
+ return nil
+}
+
+func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ.AssignableTo(jsonNumberType) {
+ return &jsonNumberCodec{}
+ }
+ if typ.AssignableTo(jsoniterNumberType) {
+ return &jsoniterNumberCodec{}
+ }
+ return nil
+}
+
+type jsonNumberCodec struct {
+}
+
+func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ switch iter.WhatIsNext() {
+ case StringValue:
+ *((*json.Number)(ptr)) = json.Number(iter.ReadString())
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *((*json.Number)(ptr)) = ""
+ default:
+ *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
+ }
+}
+
+func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ number := *((*json.Number)(ptr))
+ if len(number) == 0 {
+ stream.writeByte('0')
+ } else {
+ stream.WriteRaw(string(number))
+ }
+}
+
+func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*json.Number)(ptr))) == 0
+}
+
+type jsoniterNumberCodec struct {
+}
+
+func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ switch iter.WhatIsNext() {
+ case StringValue:
+ *((*Number)(ptr)) = Number(iter.ReadString())
+ case NilValue:
+ iter.skipFourBytes('n', 'u', 'l', 'l')
+ *((*Number)(ptr)) = ""
+ default:
+ *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
+ }
+}
+
+func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ number := *((*Number)(ptr))
+ if len(number) == 0 {
+ stream.writeByte('0')
+ } else {
+ stream.WriteRaw(string(number))
+ }
+}
+
+func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*Number)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
new file mode 100644
index 0000000..f261993
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -0,0 +1,60 @@
+package jsoniter
+
+import (
+ "encoding/json"
+ "github.com/modern-go/reflect2"
+ "unsafe"
+)
+
+var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
+var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
+
+func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == jsonRawMessageType {
+ return &jsonRawMessageCodec{}
+ }
+ if typ == jsoniterRawMessageType {
+ return &jsoniterRawMessageCodec{}
+ }
+ return nil
+}
+
+func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ == jsonRawMessageType {
+ return &jsonRawMessageCodec{}
+ }
+ if typ == jsoniterRawMessageType {
+ return &jsoniterRawMessageCodec{}
+ }
+ return nil
+}
+
+type jsonRawMessageCodec struct {
+}
+
+func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+}
+
+func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*json.RawMessage)(ptr))) == 0
+}
+
+type jsoniterRawMessageCodec struct {
+}
+
+func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteRaw(string(*((*RawMessage)(ptr))))
+}
+
+func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*RawMessage)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
new file mode 100644
index 0000000..9e2b623
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -0,0 +1,346 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "sort"
+ "unsafe"
+)
+
+func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
+ mapType := typ.(*reflect2.UnsafeMapType)
+ keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
+ elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
+ return &mapDecoder{
+ mapType: mapType,
+ keyType: mapType.Key(),
+ elemType: mapType.Elem(),
+ keyDecoder: keyDecoder,
+ elemDecoder: elemDecoder,
+ }
+}
+
+func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
+ mapType := typ.(*reflect2.UnsafeMapType)
+ if ctx.sortMapKeys {
+ return &sortKeysMapEncoder{
+ mapType: mapType,
+ keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+ elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+ }
+ }
+ return &mapEncoder{
+ mapType: mapType,
+ keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+ elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+ }
+}
+
+func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
+ decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ decoder := extension.CreateMapKeyDecoder(typ)
+ if decoder != nil {
+ return decoder
+ }
+ }
+ switch typ.Kind() {
+ case reflect.String:
+ return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+ case reflect.Bool,
+ reflect.Uint8, reflect.Int8,
+ reflect.Uint16, reflect.Int16,
+ reflect.Uint32, reflect.Int32,
+ reflect.Uint64, reflect.Int64,
+ reflect.Uint, reflect.Int,
+ reflect.Float32, reflect.Float64,
+ reflect.Uintptr:
+ typ = reflect2.DefaultTypeOfKind(typ.Kind())
+ return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
+ default:
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(unmarshalerType) {
+ return &unmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{
+ valType: ptrType,
+ },
+ }
+ }
+ if typ.Implements(textUnmarshalerType) {
+ return &textUnmarshalerDecoder{
+ valType: typ,
+ }
+ }
+ return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+ }
+}
+
+func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
+ encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ for _, extension := range ctx.extraExtensions {
+ encoder := extension.CreateMapKeyEncoder(typ)
+ if encoder != nil {
+ return encoder
+ }
+ }
+ switch typ.Kind() {
+ case reflect.String:
+ return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+ case reflect.Bool,
+ reflect.Uint8, reflect.Int8,
+ reflect.Uint16, reflect.Int16,
+ reflect.Uint32, reflect.Int32,
+ reflect.Uint64, reflect.Int64,
+ reflect.Uint, reflect.Int,
+ reflect.Float32, reflect.Float64,
+ reflect.Uintptr:
+ typ = reflect2.DefaultTypeOfKind(typ.Kind())
+ return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
+ default:
+ if typ == textMarshalerType {
+ return &directTextMarshalerEncoder{
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Implements(textMarshalerType) {
+ return &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ }
+ if typ.Kind() == reflect.Interface {
+ return &dynamicMapKeyEncoder{ctx, typ}
+ }
+ return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+ }
+}
+
+type mapDecoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyType reflect2.Type
+ elemType reflect2.Type
+ keyDecoder ValDecoder
+ elemDecoder ValDecoder
+}
+
+func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ mapType := decoder.mapType
+ c := iter.nextToken()
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ *(*unsafe.Pointer)(ptr) = nil
+ mapType.UnsafeSet(ptr, mapType.UnsafeNew())
+ return
+ }
+ if mapType.UnsafeIsNil(ptr) {
+ mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
+ }
+ if c != '{' {
+ iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == '}' {
+ return
+ }
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+ return
+ }
+ iter.unreadByte()
+ key := decoder.keyType.UnsafeNew()
+ decoder.keyDecoder.Decode(key, iter)
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return
+ }
+ elem := decoder.elemType.UnsafeNew()
+ decoder.elemDecoder.Decode(elem, iter)
+ decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ key := decoder.keyType.UnsafeNew()
+ decoder.keyDecoder.Decode(key, iter)
+ c = iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+ return
+ }
+ elem := decoder.elemType.UnsafeNew()
+ decoder.elemDecoder.Decode(elem, iter)
+ decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+ }
+ if c != '}' {
+ iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
+ }
+}
+
+type numericMapKeyDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+ return
+ }
+ decoder.decoder.Decode(ptr, iter)
+ c = iter.nextToken()
+ if c != '"' {
+ iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+ return
+ }
+}
+
+type numericMapKeyEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.writeByte('"')
+ encoder.encoder.Encode(ptr, stream)
+ stream.writeByte('"')
+}
+
+func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type dynamicMapKeyEncoder struct {
+ ctx *ctx
+ valType reflect2.Type
+}
+
+func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
+}
+
+func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
+}
+
+type mapEncoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyEncoder ValEncoder
+ elemEncoder ValEncoder
+}
+
+func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
+ stream.WriteObjectStart()
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ for i := 0; iter.HasNext(); i++ {
+ if i != 0 {
+ stream.WriteMore()
+ }
+ key, elem := iter.UnsafeNext()
+ encoder.keyEncoder.Encode(key, stream)
+ if stream.indention > 0 {
+ stream.writeTwoBytes(byte(':'), byte(' '))
+ } else {
+ stream.writeByte(':')
+ }
+ encoder.elemEncoder.Encode(elem, stream)
+ }
+ stream.WriteObjectEnd()
+}
+
+func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ return !iter.HasNext()
+}
+
+type sortKeysMapEncoder struct {
+ mapType *reflect2.UnsafeMapType
+ keyEncoder ValEncoder
+ elemEncoder ValEncoder
+}
+
+func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *(*unsafe.Pointer)(ptr) == nil {
+ stream.WriteNil()
+ return
+ }
+ stream.WriteObjectStart()
+ mapIter := encoder.mapType.UnsafeIterate(ptr)
+ subStream := stream.cfg.BorrowStream(nil)
+ subStream.Attachment = stream.Attachment
+ subIter := stream.cfg.BorrowIterator(nil)
+ keyValues := encodedKeyValues{}
+ for mapIter.HasNext() {
+ key, elem := mapIter.UnsafeNext()
+ subStreamIndex := subStream.Buffered()
+ encoder.keyEncoder.Encode(key, subStream)
+ if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
+ encodedKey := subStream.Buffer()[subStreamIndex:]
+ subIter.ResetBytes(encodedKey)
+ decodedKey := subIter.ReadString()
+ if stream.indention > 0 {
+ subStream.writeTwoBytes(byte(':'), byte(' '))
+ } else {
+ subStream.writeByte(':')
+ }
+ encoder.elemEncoder.Encode(elem, subStream)
+ keyValues = append(keyValues, encodedKV{
+ key: decodedKey,
+ keyValue: subStream.Buffer()[subStreamIndex:],
+ })
+ }
+ sort.Sort(keyValues)
+ for i, keyValue := range keyValues {
+ if i != 0 {
+ stream.WriteMore()
+ }
+ stream.Write(keyValue.keyValue)
+ }
+ if subStream.Error != nil && stream.Error == nil {
+ stream.Error = subStream.Error
+ }
+ stream.WriteObjectEnd()
+ stream.cfg.ReturnStream(subStream)
+ stream.cfg.ReturnIterator(subIter)
+}
+
+func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ iter := encoder.mapType.UnsafeIterate(ptr)
+ return !iter.HasNext()
+}
+
+type encodedKeyValues []encodedKV
+
+type encodedKV struct {
+ key string
+ keyValue []byte
+}
+
+func (sv encodedKeyValues) Len() int { return len(sv) }
+func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
new file mode 100644
index 0000000..3e21f37
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -0,0 +1,225 @@
+package jsoniter
+
+import (
+ "encoding"
+ "encoding/json"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
+var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
+var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
+
+func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
+ ptrType := reflect2.PtrTo(typ)
+ if ptrType.Implements(unmarshalerType) {
+ return &referenceDecoder{
+ &unmarshalerDecoder{ptrType},
+ }
+ }
+ if ptrType.Implements(textUnmarshalerType) {
+ return &referenceDecoder{
+ &textUnmarshalerDecoder{ptrType},
+ }
+ }
+ return nil
+}
+
+func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ == marshalerType {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &directMarshalerEncoder{
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ if typ.Implements(marshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &marshalerEncoder{
+ valType: typ,
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ ptrType := reflect2.PtrTo(typ)
+ if ctx.prefix != "" && ptrType.Implements(marshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+ var encoder ValEncoder = &marshalerEncoder{
+ valType: ptrType,
+ checkIsEmpty: checkIsEmpty,
+ }
+ return &referenceEncoder{encoder}
+ }
+ if typ == textMarshalerType {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &directTextMarshalerEncoder{
+ checkIsEmpty: checkIsEmpty,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ }
+ return encoder
+ }
+ if typ.Implements(textMarshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, typ)
+ var encoder ValEncoder = &textMarshalerEncoder{
+ valType: typ,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ checkIsEmpty: checkIsEmpty,
+ }
+ return encoder
+ }
+ // if prefix is empty, the type is the root type
+ if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
+ checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+ var encoder ValEncoder = &textMarshalerEncoder{
+ valType: ptrType,
+ stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+ checkIsEmpty: checkIsEmpty,
+ }
+ return &referenceEncoder{encoder}
+ }
+ return nil
+}
+
+type marshalerEncoder struct {
+ checkIsEmpty checkIsEmpty
+ valType reflect2.Type
+}
+
+func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+ stream.WriteNil()
+ return
+ }
+ marshaler := obj.(json.Marshaler)
+ bytes, err := marshaler.MarshalJSON()
+ if err != nil {
+ stream.Error = err
+ } else {
+ // html escape was already done by jsoniter
+ // but the extra '\n' should be trimed
+ l := len(bytes)
+ if l > 0 && bytes[l-1] == '\n' {
+ bytes = bytes[:l-1]
+ }
+ stream.Write(bytes)
+ }
+}
+
+func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directMarshalerEncoder struct {
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ marshaler := *(*json.Marshaler)(ptr)
+ if marshaler == nil {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := marshaler.MarshalJSON()
+ if err != nil {
+ stream.Error = err
+ } else {
+ stream.Write(bytes)
+ }
+}
+
+func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type textMarshalerEncoder struct {
+ valType reflect2.Type
+ stringEncoder ValEncoder
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ obj := encoder.valType.UnsafeIndirect(ptr)
+ if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+ stream.WriteNil()
+ return
+ }
+ marshaler := (obj).(encoding.TextMarshaler)
+ bytes, err := marshaler.MarshalText()
+ if err != nil {
+ stream.Error = err
+ } else {
+ str := string(bytes)
+ encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+ }
+}
+
+func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directTextMarshalerEncoder struct {
+ stringEncoder ValEncoder
+ checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ marshaler := *(*encoding.TextMarshaler)(ptr)
+ if marshaler == nil {
+ stream.WriteNil()
+ return
+ }
+ bytes, err := marshaler.MarshalText()
+ if err != nil {
+ stream.Error = err
+ } else {
+ str := string(bytes)
+ encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+ }
+}
+
+func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type unmarshalerDecoder struct {
+ valType reflect2.Type
+}
+
+func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valType := decoder.valType
+ obj := valType.UnsafeIndirect(ptr)
+ unmarshaler := obj.(json.Unmarshaler)
+ iter.nextToken()
+ iter.unreadByte() // skip spaces
+ bytes := iter.SkipAndReturnBytes()
+ err := unmarshaler.UnmarshalJSON(bytes)
+ if err != nil {
+ iter.ReportError("unmarshalerDecoder", err.Error())
+ }
+}
+
+type textUnmarshalerDecoder struct {
+ valType reflect2.Type
+}
+
+func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valType := decoder.valType
+ obj := valType.UnsafeIndirect(ptr)
+ if reflect2.IsNil(obj) {
+ ptrType := valType.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ elem := elemType.UnsafeNew()
+ ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
+ obj = valType.UnsafeIndirect(ptr)
+ }
+ unmarshaler := (obj).(encoding.TextUnmarshaler)
+ str := iter.ReadString()
+ err := unmarshaler.UnmarshalText([]byte(str))
+ if err != nil {
+ iter.ReportError("textUnmarshalerDecoder", err.Error())
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
new file mode 100644
index 0000000..f88722d
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_native.go
@@ -0,0 +1,453 @@
+package jsoniter
+
+import (
+ "encoding/base64"
+ "reflect"
+ "strconv"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+const ptrSize = 32 << uintptr(^uintptr(0)>>63)
+
+func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
+ if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+ sliceDecoder := decoderOfSlice(ctx, typ)
+ return &base64Codec{sliceDecoder: sliceDecoder}
+ }
+ typeName := typ.String()
+ kind := typ.Kind()
+ switch kind {
+ case reflect.String:
+ if typeName != "string" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+ }
+ return &stringCodec{}
+ case reflect.Int:
+ if typeName != "int" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &int32Codec{}
+ }
+ return &int64Codec{}
+ case reflect.Int8:
+ if typeName != "int8" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+ }
+ return &int8Codec{}
+ case reflect.Int16:
+ if typeName != "int16" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+ }
+ return &int16Codec{}
+ case reflect.Int32:
+ if typeName != "int32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+ }
+ return &int32Codec{}
+ case reflect.Int64:
+ if typeName != "int64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+ }
+ return &int64Codec{}
+ case reflect.Uint:
+ if typeName != "uint" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint8:
+ if typeName != "uint8" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+ }
+ return &uint8Codec{}
+ case reflect.Uint16:
+ if typeName != "uint16" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+ }
+ return &uint16Codec{}
+ case reflect.Uint32:
+ if typeName != "uint32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+ }
+ return &uint32Codec{}
+ case reflect.Uintptr:
+ if typeName != "uintptr" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+ }
+ if ptrSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint64:
+ if typeName != "uint64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+ }
+ return &uint64Codec{}
+ case reflect.Float32:
+ if typeName != "float32" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+ }
+ return &float32Codec{}
+ case reflect.Float64:
+ if typeName != "float64" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+ }
+ return &float64Codec{}
+ case reflect.Bool:
+ if typeName != "bool" {
+ return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+ }
+ return &boolCodec{}
+ }
+ return nil
+}
+
+func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
+ if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+ sliceDecoder := decoderOfSlice(ctx, typ)
+ return &base64Codec{sliceDecoder: sliceDecoder}
+ }
+ typeName := typ.String()
+ switch typ.Kind() {
+ case reflect.String:
+ if typeName != "string" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+ }
+ return &stringCodec{}
+ case reflect.Int:
+ if typeName != "int" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &int32Codec{}
+ }
+ return &int64Codec{}
+ case reflect.Int8:
+ if typeName != "int8" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+ }
+ return &int8Codec{}
+ case reflect.Int16:
+ if typeName != "int16" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+ }
+ return &int16Codec{}
+ case reflect.Int32:
+ if typeName != "int32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+ }
+ return &int32Codec{}
+ case reflect.Int64:
+ if typeName != "int64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+ }
+ return &int64Codec{}
+ case reflect.Uint:
+ if typeName != "uint" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+ }
+ if strconv.IntSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint8:
+ if typeName != "uint8" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+ }
+ return &uint8Codec{}
+ case reflect.Uint16:
+ if typeName != "uint16" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+ }
+ return &uint16Codec{}
+ case reflect.Uint32:
+ if typeName != "uint32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+ }
+ return &uint32Codec{}
+ case reflect.Uintptr:
+ if typeName != "uintptr" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+ }
+ if ptrSize == 32 {
+ return &uint32Codec{}
+ }
+ return &uint64Codec{}
+ case reflect.Uint64:
+ if typeName != "uint64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+ }
+ return &uint64Codec{}
+ case reflect.Float32:
+ if typeName != "float32" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+ }
+ return &float32Codec{}
+ case reflect.Float64:
+ if typeName != "float64" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+ }
+ return &float64Codec{}
+ case reflect.Bool:
+ if typeName != "bool" {
+ return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+ }
+ return &boolCodec{}
+ }
+ return nil
+}
+
+type stringCodec struct {
+}
+
+func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ *((*string)(ptr)) = iter.ReadString()
+}
+
+func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ str := *((*string)(ptr))
+ stream.WriteString(str)
+}
+
+func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*string)(ptr)) == ""
+}
+
+type int8Codec struct {
+}
+
+func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int8)(ptr)) = iter.ReadInt8()
+ }
+}
+
+func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt8(*((*int8)(ptr)))
+}
+
+func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int8)(ptr)) == 0
+}
+
+type int16Codec struct {
+}
+
+func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int16)(ptr)) = iter.ReadInt16()
+ }
+}
+
+func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt16(*((*int16)(ptr)))
+}
+
+func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int16)(ptr)) == 0
+}
+
+type int32Codec struct {
+}
+
+func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int32)(ptr)) = iter.ReadInt32()
+ }
+}
+
+func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt32(*((*int32)(ptr)))
+}
+
+func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int32)(ptr)) == 0
+}
+
+type int64Codec struct {
+}
+
+func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*int64)(ptr)) = iter.ReadInt64()
+ }
+}
+
+func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteInt64(*((*int64)(ptr)))
+}
+
+func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*int64)(ptr)) == 0
+}
+
+type uint8Codec struct {
+}
+
+func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint8)(ptr)) = iter.ReadUint8()
+ }
+}
+
+func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint8(*((*uint8)(ptr)))
+}
+
+func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint8)(ptr)) == 0
+}
+
+type uint16Codec struct {
+}
+
+func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint16)(ptr)) = iter.ReadUint16()
+ }
+}
+
+func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint16(*((*uint16)(ptr)))
+}
+
+func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint16)(ptr)) == 0
+}
+
+type uint32Codec struct {
+}
+
+func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint32)(ptr)) = iter.ReadUint32()
+ }
+}
+
+func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint32(*((*uint32)(ptr)))
+}
+
+func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint32)(ptr)) == 0
+}
+
+type uint64Codec struct {
+}
+
+func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*uint64)(ptr)) = iter.ReadUint64()
+ }
+}
+
+func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteUint64(*((*uint64)(ptr)))
+}
+
+func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*uint64)(ptr)) == 0
+}
+
+type float32Codec struct {
+}
+
+func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*float32)(ptr)) = iter.ReadFloat32()
+ }
+}
+
+func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat32(*((*float32)(ptr)))
+}
+
+func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float32)(ptr)) == 0
+}
+
+type float64Codec struct {
+}
+
+func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*float64)(ptr)) = iter.ReadFloat64()
+ }
+}
+
+func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteFloat64(*((*float64)(ptr)))
+}
+
+func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*float64)(ptr)) == 0
+}
+
+type boolCodec struct {
+}
+
+func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.ReadNil() {
+ *((*bool)(ptr)) = iter.ReadBool()
+ }
+}
+
+func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteBool(*((*bool)(ptr)))
+}
+
+func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
+ return !(*((*bool)(ptr)))
+}
+
+type base64Codec struct {
+ sliceType *reflect2.UnsafeSliceType
+ sliceDecoder ValDecoder
+}
+
+func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ codec.sliceType.UnsafeSetNil(ptr)
+ return
+ }
+ switch iter.WhatIsNext() {
+ case StringValue:
+ src := iter.ReadString()
+ dst, err := base64.StdEncoding.DecodeString(src)
+ if err != nil {
+ iter.ReportError("decode base64", err.Error())
+ } else {
+ codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
+ }
+ case ArrayValue:
+ codec.sliceDecoder.Decode(ptr, iter)
+ default:
+ iter.ReportError("base64Codec", "invalid input")
+ }
+}
+
+func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if codec.sliceType.UnsafeIsNil(ptr) {
+ stream.WriteNil()
+ return
+ }
+ src := *((*[]byte)(ptr))
+ encoding := base64.StdEncoding
+ stream.writeByte('"')
+ if len(src) != 0 {
+ size := encoding.EncodedLen(len(src))
+ buf := make([]byte, size)
+ encoding.Encode(buf, src)
+ stream.buf = append(stream.buf, buf...)
+ }
+ stream.writeByte('"')
+}
+
+func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+ return len(*((*[]byte)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
new file mode 100644
index 0000000..43ec71d
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -0,0 +1,133 @@
+package jsoniter
+
+import (
+ "github.com/modern-go/reflect2"
+ "reflect"
+ "unsafe"
+)
+
+func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ decoder := decoderOfType(ctx, elemType)
+ if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
+ return &dereferenceDecoder{elemType, decoder}
+ }
+ return &OptionalDecoder{elemType, decoder}
+}
+
+func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
+ ptrType := typ.(*reflect2.UnsafePtrType)
+ elemType := ptrType.Elem()
+ elemEncoder := encoderOfType(ctx, elemType)
+ encoder := &OptionalEncoder{elemEncoder}
+ return encoder
+}
+
+type OptionalDecoder struct {
+ ValueType reflect2.Type
+ ValueDecoder ValDecoder
+}
+
+func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if iter.ReadNil() {
+ *((*unsafe.Pointer)(ptr)) = nil
+ } else {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ //pointer to null, we have to allocate memory to hold the value
+ newPtr := decoder.ValueType.UnsafeNew()
+ decoder.ValueDecoder.Decode(newPtr, iter)
+ *((*unsafe.Pointer)(ptr)) = newPtr
+ } else {
+ //reuse existing instance
+ decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+ }
+ }
+}
+
+type dereferenceDecoder struct {
+ // only to deference a pointer
+ valueType reflect2.Type
+ valueDecoder ValDecoder
+}
+
+func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ //pointer to null, we have to allocate memory to hold the value
+ newPtr := decoder.valueType.UnsafeNew()
+ decoder.valueDecoder.Decode(newPtr, iter)
+ *((*unsafe.Pointer)(ptr)) = newPtr
+ } else {
+ //reuse existing instance
+ decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+ }
+}
+
+type OptionalEncoder struct {
+ ValueEncoder ValEncoder
+}
+
+func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+ }
+}
+
+func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return *((*unsafe.Pointer)(ptr)) == nil
+}
+
+type dereferenceEncoder struct {
+ ValueEncoder ValEncoder
+}
+
+func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if *((*unsafe.Pointer)(ptr)) == nil {
+ stream.WriteNil()
+ } else {
+ encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+ }
+}
+
+func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ dePtr := *((*unsafe.Pointer)(ptr))
+ if dePtr == nil {
+ return true
+ }
+ return encoder.ValueEncoder.IsEmpty(dePtr)
+}
+
+func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+ deReferenced := *((*unsafe.Pointer)(ptr))
+ if deReferenced == nil {
+ return true
+ }
+ isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
+ if !converted {
+ return false
+ }
+ fieldPtr := unsafe.Pointer(deReferenced)
+ return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type referenceEncoder struct {
+ encoder ValEncoder
+}
+
+func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+type referenceDecoder struct {
+ decoder ValDecoder
+}
+
+func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
new file mode 100644
index 0000000..9441d79
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_slice.go
@@ -0,0 +1,99 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "unsafe"
+)
+
+func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
+ sliceType := typ.(*reflect2.UnsafeSliceType)
+ decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+ return &sliceDecoder{sliceType, decoder}
+}
+
+func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
+ sliceType := typ.(*reflect2.UnsafeSliceType)
+ encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+ return &sliceEncoder{sliceType, encoder}
+}
+
+type sliceEncoder struct {
+ sliceType *reflect2.UnsafeSliceType
+ elemEncoder ValEncoder
+}
+
+func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ if encoder.sliceType.UnsafeIsNil(ptr) {
+ stream.WriteNil()
+ return
+ }
+ length := encoder.sliceType.UnsafeLengthOf(ptr)
+ if length == 0 {
+ stream.WriteEmptyArray()
+ return
+ }
+ stream.WriteArrayStart()
+ encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
+ for i := 1; i < length; i++ {
+ stream.WriteMore()
+ elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
+ encoder.elemEncoder.Encode(elemPtr, stream)
+ }
+ stream.WriteArrayEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
+ }
+}
+
+func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.sliceType.UnsafeLengthOf(ptr) == 0
+}
+
+type sliceDecoder struct {
+ sliceType *reflect2.UnsafeSliceType
+ elemDecoder ValDecoder
+}
+
+func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.doDecode(ptr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
+ }
+}
+
+func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ sliceType := decoder.sliceType
+ if c == 'n' {
+ iter.skipThreeBytes('u', 'l', 'l')
+ sliceType.UnsafeSetNil(ptr)
+ return
+ }
+ if c != '[' {
+ iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
+ return
+ }
+ c = iter.nextToken()
+ if c == ']' {
+ sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
+ return
+ }
+ iter.unreadByte()
+ sliceType.UnsafeGrow(ptr, 1)
+ elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ length := 1
+ for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+ idx := length
+ length += 1
+ sliceType.UnsafeGrow(ptr, length)
+ elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
+ decoder.elemDecoder.Decode(elemPtr, iter)
+ }
+ if c != ']' {
+ iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
new file mode 100644
index 0000000..5ad5cc5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -0,0 +1,1092 @@
+package jsoniter
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "unsafe"
+
+ "github.com/modern-go/reflect2"
+)
+
+func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
+ bindings := map[string]*Binding{}
+ structDescriptor := describeStruct(ctx, typ)
+ for _, binding := range structDescriptor.Fields {
+ for _, fromName := range binding.FromNames {
+ old := bindings[fromName]
+ if old == nil {
+ bindings[fromName] = binding
+ continue
+ }
+ ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
+ if ignoreOld {
+ delete(bindings, fromName)
+ }
+ if !ignoreNew {
+ bindings[fromName] = binding
+ }
+ }
+ }
+ fields := map[string]*structFieldDecoder{}
+ for k, binding := range bindings {
+ fields[k] = binding.Decoder.(*structFieldDecoder)
+ }
+
+ if !ctx.caseSensitive() {
+ for k, binding := range bindings {
+ if _, found := fields[strings.ToLower(k)]; !found {
+ fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
+ }
+ }
+ }
+
+ return createStructDecoder(ctx, typ, fields)
+}
+
+func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
+ if ctx.disallowUnknownFields {
+ return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
+ }
+ knownHash := map[int64]struct{}{
+ 0: {},
+ }
+
+ switch len(fields) {
+ case 0:
+ return &skipObjectDecoder{typ}
+ case 1:
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
+ }
+ case 2:
+ var fieldHash1 int64
+ var fieldHash2 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldHash1 == 0 {
+ fieldHash1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else {
+ fieldHash2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ }
+ }
+ return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
+ case 3:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ }
+ }
+ return &threeFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3}
+ case 4:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ }
+ }
+ return &fourFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4}
+ case 5:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ }
+ }
+ return &fiveFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5}
+ case 6:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ }
+ }
+ return &sixFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6}
+ case 7:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ }
+ }
+ return &sevenFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7}
+ case 8:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ }
+ }
+ return &eightFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8}
+ case 9:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldName9 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ var fieldDecoder9 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else if fieldName8 == 0 {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ } else {
+ fieldName9 = fieldHash
+ fieldDecoder9 = fieldDecoder
+ }
+ }
+ return &nineFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8,
+ fieldName9, fieldDecoder9}
+ case 10:
+ var fieldName1 int64
+ var fieldName2 int64
+ var fieldName3 int64
+ var fieldName4 int64
+ var fieldName5 int64
+ var fieldName6 int64
+ var fieldName7 int64
+ var fieldName8 int64
+ var fieldName9 int64
+ var fieldName10 int64
+ var fieldDecoder1 *structFieldDecoder
+ var fieldDecoder2 *structFieldDecoder
+ var fieldDecoder3 *structFieldDecoder
+ var fieldDecoder4 *structFieldDecoder
+ var fieldDecoder5 *structFieldDecoder
+ var fieldDecoder6 *structFieldDecoder
+ var fieldDecoder7 *structFieldDecoder
+ var fieldDecoder8 *structFieldDecoder
+ var fieldDecoder9 *structFieldDecoder
+ var fieldDecoder10 *structFieldDecoder
+ for fieldName, fieldDecoder := range fields {
+ fieldHash := calcHash(fieldName, ctx.caseSensitive())
+ _, known := knownHash[fieldHash]
+ if known {
+ return &generalStructDecoder{typ, fields, false}
+ }
+ knownHash[fieldHash] = struct{}{}
+ if fieldName1 == 0 {
+ fieldName1 = fieldHash
+ fieldDecoder1 = fieldDecoder
+ } else if fieldName2 == 0 {
+ fieldName2 = fieldHash
+ fieldDecoder2 = fieldDecoder
+ } else if fieldName3 == 0 {
+ fieldName3 = fieldHash
+ fieldDecoder3 = fieldDecoder
+ } else if fieldName4 == 0 {
+ fieldName4 = fieldHash
+ fieldDecoder4 = fieldDecoder
+ } else if fieldName5 == 0 {
+ fieldName5 = fieldHash
+ fieldDecoder5 = fieldDecoder
+ } else if fieldName6 == 0 {
+ fieldName6 = fieldHash
+ fieldDecoder6 = fieldDecoder
+ } else if fieldName7 == 0 {
+ fieldName7 = fieldHash
+ fieldDecoder7 = fieldDecoder
+ } else if fieldName8 == 0 {
+ fieldName8 = fieldHash
+ fieldDecoder8 = fieldDecoder
+ } else if fieldName9 == 0 {
+ fieldName9 = fieldHash
+ fieldDecoder9 = fieldDecoder
+ } else {
+ fieldName10 = fieldHash
+ fieldDecoder10 = fieldDecoder
+ }
+ }
+ return &tenFieldsStructDecoder{typ,
+ fieldName1, fieldDecoder1,
+ fieldName2, fieldDecoder2,
+ fieldName3, fieldDecoder3,
+ fieldName4, fieldDecoder4,
+ fieldName5, fieldDecoder5,
+ fieldName6, fieldDecoder6,
+ fieldName7, fieldDecoder7,
+ fieldName8, fieldDecoder8,
+ fieldName9, fieldDecoder9,
+ fieldName10, fieldDecoder10}
+ }
+ return &generalStructDecoder{typ, fields, false}
+}
+
+type generalStructDecoder struct {
+ typ reflect2.Type
+ fields map[string]*structFieldDecoder
+ disallowUnknownFields bool
+}
+
+func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ var c byte
+ for c = ','; c == ','; c = iter.nextToken() {
+ decoder.decodeOneField(ptr, iter)
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ if c != '}' {
+ iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
+ }
+ iter.decrementDepth()
+}
+
+func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
+ var field string
+ var fieldDecoder *structFieldDecoder
+ if iter.cfg.objectFieldMustBeSimpleString {
+ fieldBytes := iter.ReadStringAsSlice()
+ field = *(*string)(unsafe.Pointer(&fieldBytes))
+ fieldDecoder = decoder.fields[field]
+ if fieldDecoder == nil && !iter.cfg.caseSensitive {
+ fieldDecoder = decoder.fields[strings.ToLower(field)]
+ }
+ } else {
+ field = iter.ReadString()
+ fieldDecoder = decoder.fields[field]
+ if fieldDecoder == nil && !iter.cfg.caseSensitive {
+ fieldDecoder = decoder.fields[strings.ToLower(field)]
+ }
+ }
+ if fieldDecoder == nil {
+ if decoder.disallowUnknownFields {
+ msg := "found unknown field: " + field
+ iter.ReportError("ReadObject", msg)
+ }
+ c := iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ iter.Skip()
+ return
+ }
+ c := iter.nextToken()
+ if c != ':' {
+ iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+ }
+ fieldDecoder.Decode(ptr, iter)
+}
+
+type skipObjectDecoder struct {
+ typ reflect2.Type
+}
+
+func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ valueType := iter.WhatIsNext()
+ if valueType != ObjectValue && valueType != NilValue {
+ iter.ReportError("skipObjectDecoder", "expect object or null")
+ return
+ }
+ iter.Skip()
+}
+
+type oneFieldStructDecoder struct {
+ typ reflect2.Type
+ fieldHash int64
+ fieldDecoder *structFieldDecoder
+}
+
+func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ if iter.readFieldHash() == decoder.fieldHash {
+ decoder.fieldDecoder.Decode(ptr, iter)
+ } else {
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type twoFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+}
+
+func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type threeFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+}
+
+func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type fourFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+}
+
+func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type fiveFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+}
+
+func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type sixFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+}
+
+func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type sevenFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+}
+
+func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type eightFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+}
+
+func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type nineFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+ fieldHash9 int64
+ fieldDecoder9 *structFieldDecoder
+}
+
+func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ case decoder.fieldHash9:
+ decoder.fieldDecoder9.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type tenFieldsStructDecoder struct {
+ typ reflect2.Type
+ fieldHash1 int64
+ fieldDecoder1 *structFieldDecoder
+ fieldHash2 int64
+ fieldDecoder2 *structFieldDecoder
+ fieldHash3 int64
+ fieldDecoder3 *structFieldDecoder
+ fieldHash4 int64
+ fieldDecoder4 *structFieldDecoder
+ fieldHash5 int64
+ fieldDecoder5 *structFieldDecoder
+ fieldHash6 int64
+ fieldDecoder6 *structFieldDecoder
+ fieldHash7 int64
+ fieldDecoder7 *structFieldDecoder
+ fieldHash8 int64
+ fieldDecoder8 *structFieldDecoder
+ fieldHash9 int64
+ fieldDecoder9 *structFieldDecoder
+ fieldHash10 int64
+ fieldDecoder10 *structFieldDecoder
+}
+
+func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ if !iter.readObjectStart() {
+ return
+ }
+ if !iter.incrementDepth() {
+ return
+ }
+ for {
+ switch iter.readFieldHash() {
+ case decoder.fieldHash1:
+ decoder.fieldDecoder1.Decode(ptr, iter)
+ case decoder.fieldHash2:
+ decoder.fieldDecoder2.Decode(ptr, iter)
+ case decoder.fieldHash3:
+ decoder.fieldDecoder3.Decode(ptr, iter)
+ case decoder.fieldHash4:
+ decoder.fieldDecoder4.Decode(ptr, iter)
+ case decoder.fieldHash5:
+ decoder.fieldDecoder5.Decode(ptr, iter)
+ case decoder.fieldHash6:
+ decoder.fieldDecoder6.Decode(ptr, iter)
+ case decoder.fieldHash7:
+ decoder.fieldDecoder7.Decode(ptr, iter)
+ case decoder.fieldHash8:
+ decoder.fieldDecoder8.Decode(ptr, iter)
+ case decoder.fieldHash9:
+ decoder.fieldDecoder9.Decode(ptr, iter)
+ case decoder.fieldHash10:
+ decoder.fieldDecoder10.Decode(ptr, iter)
+ default:
+ iter.Skip()
+ }
+ if iter.isObjectEnd() {
+ break
+ }
+ }
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+ }
+ iter.decrementDepth()
+}
+
+type structFieldDecoder struct {
+ field reflect2.StructField
+ fieldDecoder ValDecoder
+}
+
+func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ fieldPtr := decoder.field.UnsafeGet(ptr)
+ decoder.fieldDecoder.Decode(fieldPtr, iter)
+ if iter.Error != nil && iter.Error != io.EOF {
+ iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
+ }
+}
+
+type stringModeStringDecoder struct {
+ elemDecoder ValDecoder
+ cfg *frozenConfig
+}
+
+func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ decoder.elemDecoder.Decode(ptr, iter)
+ str := *((*string)(ptr))
+ tempIter := decoder.cfg.BorrowIterator([]byte(str))
+ defer decoder.cfg.ReturnIterator(tempIter)
+ *((*string)(ptr)) = tempIter.ReadString()
+}
+
+type stringModeNumberDecoder struct {
+ elemDecoder ValDecoder
+}
+
+func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+ c := iter.nextToken()
+ if c != '"' {
+ iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+ return
+ }
+ decoder.elemDecoder.Decode(ptr, iter)
+ if iter.Error != nil {
+ return
+ }
+ c = iter.readByte()
+ if c != '"' {
+ iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+ return
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
new file mode 100644
index 0000000..152e3ef
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -0,0 +1,211 @@
+package jsoniter
+
+import (
+ "fmt"
+ "github.com/modern-go/reflect2"
+ "io"
+ "reflect"
+ "unsafe"
+)
+
+func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
+ type bindingTo struct {
+ binding *Binding
+ toName string
+ ignored bool
+ }
+ orderedBindings := []*bindingTo{}
+ structDescriptor := describeStruct(ctx, typ)
+ for _, binding := range structDescriptor.Fields {
+ for _, toName := range binding.ToNames {
+ new := &bindingTo{
+ binding: binding,
+ toName: toName,
+ }
+ for _, old := range orderedBindings {
+ if old.toName != toName {
+ continue
+ }
+ old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
+ }
+ orderedBindings = append(orderedBindings, new)
+ }
+ }
+ if len(orderedBindings) == 0 {
+ return &emptyStructEncoder{}
+ }
+ finalOrderedFields := []structFieldTo{}
+ for _, bindingTo := range orderedBindings {
+ if !bindingTo.ignored {
+ finalOrderedFields = append(finalOrderedFields, structFieldTo{
+ encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
+ toName: bindingTo.toName,
+ })
+ }
+ }
+ return &structEncoder{typ, finalOrderedFields}
+}
+
+func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
+ encoder := createEncoderOfNative(ctx, typ)
+ if encoder != nil {
+ return encoder
+ }
+ kind := typ.Kind()
+ switch kind {
+ case reflect.Interface:
+ return &dynamicEncoder{typ}
+ case reflect.Struct:
+ return &structEncoder{typ: typ}
+ case reflect.Array:
+ return &arrayEncoder{}
+ case reflect.Slice:
+ return &sliceEncoder{}
+ case reflect.Map:
+ return encoderOfMap(ctx, typ)
+ case reflect.Ptr:
+ return &OptionalEncoder{}
+ default:
+ return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
+ }
+}
+
+func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
+ newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
+ oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
+ if newTagged {
+ if oldTagged {
+ if len(old.levels) > len(new.levels) {
+ return true, false
+ } else if len(new.levels) > len(old.levels) {
+ return false, true
+ } else {
+ return true, true
+ }
+ } else {
+ return true, false
+ }
+ } else {
+ if oldTagged {
+ return true, false
+ }
+ if len(old.levels) > len(new.levels) {
+ return true, false
+ } else if len(new.levels) > len(old.levels) {
+ return false, true
+ } else {
+ return true, true
+ }
+ }
+}
+
+type structFieldEncoder struct {
+ field reflect2.StructField
+ fieldEncoder ValEncoder
+ omitempty bool
+}
+
+func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ encoder.fieldEncoder.Encode(fieldPtr, stream)
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
+ }
+}
+
+func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ return encoder.fieldEncoder.IsEmpty(fieldPtr)
+}
+
+func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+ isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
+ if !converted {
+ return false
+ }
+ fieldPtr := encoder.field.UnsafeGet(ptr)
+ return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type IsEmbeddedPtrNil interface {
+ IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
+}
+
+type structEncoder struct {
+ typ reflect2.Type
+ fields []structFieldTo
+}
+
+type structFieldTo struct {
+ encoder *structFieldEncoder
+ toName string
+}
+
+func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteObjectStart()
+ isNotFirst := false
+ for _, field := range encoder.fields {
+ if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
+ continue
+ }
+ if field.encoder.IsEmbeddedPtrNil(ptr) {
+ continue
+ }
+ if isNotFirst {
+ stream.WriteMore()
+ }
+ stream.WriteObjectField(field.toName)
+ field.encoder.Encode(ptr, stream)
+ isNotFirst = true
+ }
+ stream.WriteObjectEnd()
+ if stream.Error != nil && stream.Error != io.EOF {
+ stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
+ }
+}
+
+func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type emptyStructEncoder struct {
+}
+
+func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.WriteEmptyObject()
+}
+
+func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return false
+}
+
+type stringModeNumberEncoder struct {
+ elemEncoder ValEncoder
+}
+
+func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ stream.writeByte('"')
+ encoder.elemEncoder.Encode(ptr, stream)
+ stream.writeByte('"')
+}
+
+func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.elemEncoder.IsEmpty(ptr)
+}
+
+type stringModeStringEncoder struct {
+ elemEncoder ValEncoder
+ cfg *frozenConfig
+}
+
+func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+ tempStream := encoder.cfg.BorrowStream(nil)
+ tempStream.Attachment = stream.Attachment
+ defer encoder.cfg.ReturnStream(tempStream)
+ encoder.elemEncoder.Encode(ptr, tempStream)
+ stream.WriteString(string(tempStream.Buffer()))
+}
+
+func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+ return encoder.elemEncoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
new file mode 100644
index 0000000..17662fd
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream.go
@@ -0,0 +1,211 @@
+package jsoniter
+
+import (
+ "io"
+)
+
+// stream is a io.Writer like object, with JSON specific write functions.
+// Error is not returned as return value, but stored as Error member on this stream instance.
+type Stream struct {
+ cfg *frozenConfig
+ out io.Writer
+ buf []byte
+ Error error
+ indention int
+ Attachment interface{} // open for customized encoder
+}
+
+// NewStream create new stream instance.
+// cfg can be jsoniter.ConfigDefault.
+// out can be nil if write to internal buffer.
+// bufSize is the initial size for the internal buffer in bytes.
+func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
+ return &Stream{
+ cfg: cfg.(*frozenConfig),
+ out: out,
+ buf: make([]byte, 0, bufSize),
+ Error: nil,
+ indention: 0,
+ }
+}
+
+// Pool returns a pool can provide more stream with same configuration
+func (stream *Stream) Pool() StreamPool {
+ return stream.cfg
+}
+
+// Reset reuse this stream instance by assign a new writer
+func (stream *Stream) Reset(out io.Writer) {
+ stream.out = out
+ stream.buf = stream.buf[:0]
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (stream *Stream) Available() int {
+ return cap(stream.buf) - len(stream.buf)
+}
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (stream *Stream) Buffered() int {
+ return len(stream.buf)
+}
+
+// Buffer if writer is nil, use this method to take the result
+func (stream *Stream) Buffer() []byte {
+ return stream.buf
+}
+
+// SetBuffer allows to append to the internal buffer directly
+func (stream *Stream) SetBuffer(buf []byte) {
+ stream.buf = buf
+}
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (stream *Stream) Write(p []byte) (nn int, err error) {
+ stream.buf = append(stream.buf, p...)
+ if stream.out != nil {
+ nn, err = stream.out.Write(stream.buf)
+ stream.buf = stream.buf[nn:]
+ return
+ }
+ return len(p), nil
+}
+
+// WriteByte writes a single byte.
+func (stream *Stream) writeByte(c byte) {
+ stream.buf = append(stream.buf, c)
+}
+
+func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
+ stream.buf = append(stream.buf, c1, c2)
+}
+
+func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3)
+}
+
+func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3, c4)
+}
+
+func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
+ stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (stream *Stream) Flush() error {
+ if stream.out == nil {
+ return nil
+ }
+ if stream.Error != nil {
+ return stream.Error
+ }
+ n, err := stream.out.Write(stream.buf)
+ if err != nil {
+ if stream.Error == nil {
+ stream.Error = err
+ }
+ return err
+ }
+ stream.buf = stream.buf[n:]
+ return nil
+}
+
+// WriteRaw write string out without quotes, just like []byte
+func (stream *Stream) WriteRaw(s string) {
+ stream.buf = append(stream.buf, s...)
+}
+
+// WriteNil write null to stream
+func (stream *Stream) WriteNil() {
+ stream.writeFourBytes('n', 'u', 'l', 'l')
+}
+
+// WriteTrue write true to stream
+func (stream *Stream) WriteTrue() {
+ stream.writeFourBytes('t', 'r', 'u', 'e')
+}
+
+// WriteFalse write false to stream
+func (stream *Stream) WriteFalse() {
+ stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
+}
+
+// WriteBool write true or false into stream
+func (stream *Stream) WriteBool(val bool) {
+ if val {
+ stream.WriteTrue()
+ } else {
+ stream.WriteFalse()
+ }
+}
+
+// WriteObjectStart write { with possible indention
+func (stream *Stream) WriteObjectStart() {
+ stream.indention += stream.cfg.indentionStep
+ stream.writeByte('{')
+ stream.writeIndention(0)
+}
+
+// WriteObjectField write "field": with possible indention
+func (stream *Stream) WriteObjectField(field string) {
+ stream.WriteString(field)
+ if stream.indention > 0 {
+ stream.writeTwoBytes(':', ' ')
+ } else {
+ stream.writeByte(':')
+ }
+}
+
+// WriteObjectEnd write } with possible indention
+func (stream *Stream) WriteObjectEnd() {
+ stream.writeIndention(stream.cfg.indentionStep)
+ stream.indention -= stream.cfg.indentionStep
+ stream.writeByte('}')
+}
+
+// WriteEmptyObject write {}
+func (stream *Stream) WriteEmptyObject() {
+ stream.writeByte('{')
+ stream.writeByte('}')
+}
+
+// WriteMore write , with possible indention
+func (stream *Stream) WriteMore() {
+ stream.writeByte(',')
+ stream.writeIndention(0)
+ stream.Flush()
+}
+
+// WriteArrayStart write [ with possible indention
+func (stream *Stream) WriteArrayStart() {
+ stream.indention += stream.cfg.indentionStep
+ stream.writeByte('[')
+ stream.writeIndention(0)
+}
+
+// WriteEmptyArray write []
+func (stream *Stream) WriteEmptyArray() {
+ stream.writeTwoBytes('[', ']')
+}
+
+// WriteArrayEnd write ] with possible indention
+func (stream *Stream) WriteArrayEnd() {
+ stream.writeIndention(stream.cfg.indentionStep)
+ stream.indention -= stream.cfg.indentionStep
+ stream.writeByte(']')
+}
+
+func (stream *Stream) writeIndention(delta int) {
+ if stream.indention == 0 {
+ return
+ }
+ stream.writeByte('\n')
+ toWrite := stream.indention - delta
+ for i := 0; i < toWrite; i++ {
+ stream.buf = append(stream.buf, ' ')
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
new file mode 100644
index 0000000..826aa59
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_float.go
@@ -0,0 +1,111 @@
+package jsoniter
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+)
+
+var pow10 []uint64
+
+func init() {
+ pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
+}
+
+// WriteFloat32 write float32 to stream
+func (stream *Stream) WriteFloat32(val float32) {
+ if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ abs := math.Abs(float64(val))
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
+ fmt = 'e'
+ }
+ }
+ stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
+}
+
+// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat32Lossy(val float32) {
+ if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ if val < 0 {
+ stream.writeByte('-')
+ val = -val
+ }
+ if val > 0x4ffffff {
+ stream.WriteFloat32(val)
+ return
+ }
+ precision := 6
+ exp := uint64(1000000) // 6
+ lval := uint64(float64(val)*float64(exp) + 0.5)
+ stream.WriteUint64(lval / exp)
+ fval := lval % exp
+ if fval == 0 {
+ return
+ }
+ stream.writeByte('.')
+ for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+ stream.writeByte('0')
+ }
+ stream.WriteUint64(fval)
+ for stream.buf[len(stream.buf)-1] == '0' {
+ stream.buf = stream.buf[:len(stream.buf)-1]
+ }
+}
+
+// WriteFloat64 write float64 to stream
+func (stream *Stream) WriteFloat64(val float64) {
+ if math.IsInf(val, 0) || math.IsNaN(val) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ abs := math.Abs(val)
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if abs < 1e-6 || abs >= 1e21 {
+ fmt = 'e'
+ }
+ }
+ stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
+}
+
+// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat64Lossy(val float64) {
+ if math.IsInf(val, 0) || math.IsNaN(val) {
+ stream.Error = fmt.Errorf("unsupported value: %f", val)
+ return
+ }
+ if val < 0 {
+ stream.writeByte('-')
+ val = -val
+ }
+ if val > 0x4ffffff {
+ stream.WriteFloat64(val)
+ return
+ }
+ precision := 6
+ exp := uint64(1000000) // 6
+ lval := uint64(val*float64(exp) + 0.5)
+ stream.WriteUint64(lval / exp)
+ fval := lval % exp
+ if fval == 0 {
+ return
+ }
+ stream.writeByte('.')
+ for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+ stream.writeByte('0')
+ }
+ stream.WriteUint64(fval)
+ for stream.buf[len(stream.buf)-1] == '0' {
+ stream.buf = stream.buf[:len(stream.buf)-1]
+ }
+}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
new file mode 100644
index 0000000..d1059ee
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_int.go
@@ -0,0 +1,190 @@
+package jsoniter
+
+var digits []uint32
+
+func init() {
+ digits = make([]uint32, 1000)
+ for i := uint32(0); i < 1000; i++ {
+ digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
+ if i < 10 {
+ digits[i] += 2 << 24
+ } else if i < 100 {
+ digits[i] += 1 << 24
+ }
+ }
+}
+
+func writeFirstBuf(space []byte, v uint32) []byte {
+ start := v >> 24
+ if start == 0 {
+ space = append(space, byte(v>>16), byte(v>>8))
+ } else if start == 1 {
+ space = append(space, byte(v>>8))
+ }
+ space = append(space, byte(v))
+ return space
+}
+
+func writeBuf(buf []byte, v uint32) []byte {
+ return append(buf, byte(v>>16), byte(v>>8), byte(v))
+}
+
+// WriteUint8 write uint8 to stream
+func (stream *Stream) WriteUint8(val uint8) {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteInt8 write int8 to stream
+func (stream *Stream) WriteInt8(nval int8) {
+ var val uint8
+ if nval < 0 {
+ val = uint8(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint8(nval)
+ }
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteUint16 write uint16 to stream
+func (stream *Stream) WriteUint16(val uint16) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+}
+
+// WriteInt16 write int16 to stream
+func (stream *Stream) WriteInt16(nval int16) {
+ var val uint16
+ if nval < 0 {
+ val = uint16(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint16(nval)
+ }
+ stream.WriteUint16(val)
+}
+
+// WriteUint32 write uint32 to stream
+func (stream *Stream) WriteUint32(val uint32) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ q2 := q1 / 1000
+ if q2 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r2 := q1 - q2*1000
+ q3 := q2 / 1000
+ if q3 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q2])
+ } else {
+ r3 := q2 - q3*1000
+ stream.buf = append(stream.buf, byte(q3+'0'))
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ }
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt32 write int32 to stream
+func (stream *Stream) WriteInt32(nval int32) {
+ var val uint32
+ if nval < 0 {
+ val = uint32(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint32(nval)
+ }
+ stream.WriteUint32(val)
+}
+
+// WriteUint64 write uint64 to stream
+func (stream *Stream) WriteUint64(val uint64) {
+ q1 := val / 1000
+ if q1 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[val])
+ return
+ }
+ r1 := val - q1*1000
+ q2 := q1 / 1000
+ if q2 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q1])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r2 := q1 - q2*1000
+ q3 := q2 / 1000
+ if q3 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q2])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r3 := q2 - q3*1000
+ q4 := q3 / 1000
+ if q4 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q3])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r4 := q3 - q4*1000
+ q5 := q4 / 1000
+ if q5 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q4])
+ stream.buf = writeBuf(stream.buf, digits[r4])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+ return
+ }
+ r5 := q4 - q5*1000
+ q6 := q5 / 1000
+ if q6 == 0 {
+ stream.buf = writeFirstBuf(stream.buf, digits[q5])
+ } else {
+ stream.buf = writeFirstBuf(stream.buf, digits[q6])
+ r6 := q5 - q6*1000
+ stream.buf = writeBuf(stream.buf, digits[r6])
+ }
+ stream.buf = writeBuf(stream.buf, digits[r5])
+ stream.buf = writeBuf(stream.buf, digits[r4])
+ stream.buf = writeBuf(stream.buf, digits[r3])
+ stream.buf = writeBuf(stream.buf, digits[r2])
+ stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt64 write int64 to stream
+func (stream *Stream) WriteInt64(nval int64) {
+ var val uint64
+ if nval < 0 {
+ val = uint64(-nval)
+ stream.buf = append(stream.buf, '-')
+ } else {
+ val = uint64(nval)
+ }
+ stream.WriteUint64(val)
+}
+
+// WriteInt write int to stream
+func (stream *Stream) WriteInt(val int) {
+ stream.WriteInt64(int64(val))
+}
+
+// WriteUint write uint to stream
+func (stream *Stream) WriteUint(val uint) {
+ stream.WriteUint64(uint64(val))
+}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
new file mode 100644
index 0000000..54c2ba0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_str.go
@@ -0,0 +1,372 @@
+package jsoniter
+
+import (
+ "unicode/utf8"
+)
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML