diff --git a/dsl/graphql/interaction.go b/dsl/graphql/interaction.go new file mode 100644 index 000000000..5d233bfad --- /dev/null +++ b/dsl/graphql/interaction.go @@ -0,0 +1,139 @@ +package graphql + +import ( + "fmt" + "regexp" + + "github.com/pact-foundation/pact-go/dsl" +) + +// Variables represents values to be substituted into the query +type Variables map[string]interface{} + +// Query is the main implementation of the Pact interface. +type Query struct { + // HTTP Headers + Headers dsl.MapMatcher + + // Path to GraphQL endpoint + Path dsl.Matcher + + // HTTP Query String + QueryString dsl.MapMatcher + + // GraphQL Query + Query string + + // GraphQL Variables + Variables Variables + + // GraphQL Operation + Operation string + + // GraphQL method (usually POST, but can be get with a query string) + // NOTE: for query string users, the standard HTTP interaction should suffice + Method string + + // Supports graphql extensions such as https://www.apollographql.com/docs/apollo-server/performance/apq/ + Extensions Extensions +} +type Extensions map[string]interface{} + +// Specify the operation (if any) +func (r *Query) WithOperation(operation string) *Query { + r.Operation = operation + + return r +} + +// WithContentType overrides the default content-type (application/json) +// for the GraphQL Query +func (r *Query) WithContentType(contentType dsl.Matcher) *Query { + r.setHeader("content-type", contentType) + + return r +} + +// Specify the method (defaults to POST) +func (r *Query) WithMethod(method string) *Query { + r.Method = method + + return r +} + +// Given specifies a provider state. Optional. +func (r *Query) WithQuery(query string) *Query { + r.Query = query + + return r +} + +// Given specifies a provider state. Optional. +func (r *Query) WithVariables(variables Variables) *Query { + r.Variables = variables + + return r +} + +// Set the query extensions +func (r *Query) WithExtensions(extensions Extensions) *Query { + r.Extensions = extensions + + return r +} + +var defaultHeaders = dsl.MapMatcher{"content-type": dsl.String("application/json")} + +func (r *Query) setHeader(headerName string, value dsl.Matcher) *Query { + if r.Headers == nil { + r.Headers = defaultHeaders + } + + r.Headers[headerName] = value + + return r +} + +// Construct a Pact HTTP request for a GraphQL interaction +func Interaction(request Query) *dsl.Request { + if request.Headers == nil { + request.Headers = defaultHeaders + } + + return &dsl.Request{ + Method: request.Method, + Path: request.Path, + Query: request.QueryString, + Body: graphQLQueryBody{ + Operation: request.Operation, + Query: dsl.Regex(request.Query, escapeGraphQlQuery(request.Query)), + Variables: request.Variables, + }, + Headers: request.Headers, + } + +} + +type graphQLQueryBody struct { + Operation string `json:"operationName,omitempty"` + Query dsl.Matcher `json:"query"` + Variables Variables `json:"variables,omitempty"` +} + +func escapeSpace(s string) string { + r := regexp.MustCompile(`\s+`) + return r.ReplaceAllString(s, `\s*`) +} + +func escapeRegexChars(s string) string { + r := regexp.MustCompile(`(?m)[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]`) + + f := func(s string) string { + return fmt.Sprintf(`\%s`, s) + } + return r.ReplaceAllStringFunc(s, f) +} + +func escapeGraphQlQuery(s string) string { + return escapeSpace(escapeRegexChars(s)) +} diff --git a/examples/graphql/consumer/graphql_consumer_test.go b/examples/graphql/consumer/graphql_consumer_test.go new file mode 100644 index 000000000..d84315b49 --- /dev/null +++ b/examples/graphql/consumer/graphql_consumer_test.go @@ -0,0 +1,138 @@ +package consumer + +import ( + "context" + "fmt" + "log" + "net/http" + "testing" + + graphqlserver "github.com/graph-gophers/graphql-go" + "github.com/graph-gophers/graphql-go/example/starwars" + "github.com/graph-gophers/graphql-go/relay" + graphql "github.com/hasura/go-graphql-client" + "github.com/pact-foundation/pact-go/dsl" + g "github.com/pact-foundation/pact-go/dsl/graphql" + "github.com/stretchr/testify/assert" +) + +func TestGraphQLConsumer(t *testing.T) { + // Create Pact connecting to local Daemon + pact := &dsl.Pact{ + Consumer: "GraphQLConsumer", + Provider: "GraphQLProvider", + Host: "localhost", + } + defer pact.Teardown() + + // Set up our expected interactions. + pact. + AddInteraction(). + Given("User foo exists"). + UponReceiving("A request to get foo"). + WithRequest(*g.Interaction(g.Query{ + Method: "POST", + Path: dsl.String("/query"), + Query: `query ($characterID:ID!){ + hero { + id, + name + }, + character(id: $characterID) + { + name, + friends{ + name, + __typename + }, + appearsIn + } + }`, + // Operation: "SomeOperation", // if needed + Variables: g.Variables{ + "characterID": "1003", + }, + })). + WillRespondWith(dsl.Response{ + Status: 200, + Headers: dsl.MapMatcher{"Content-Type": dsl.String("application/json")}, + Body: g.Response{ + Data: heroQuery{ + Hero: hero{ + ID: graphql.ID("1003"), + Name: "Darth Vader", + }, + Character: character{ + Name: "Darth Vader", + AppearsIn: []graphql.String{ + "EMPIRE", + }, + Friends: []friend{ + { + Name: "Wilhuff Tarkin", + Typename: "friends", + }, + }, + }, + }, + }}) + + // assert on the response + var test = func() error { + res, err := executeQuery(fmt.Sprintf("http://localhost:%d", pact.Server.Port)) + + fmt.Println(res) + assert.NoError(t, err) + assert.NotNil(t, res.Hero.ID) + + return nil + } + + // Verify + if err := pact.Verify(test); err != nil { + log.Fatalf("Error on Verify: %v", err) + } +} + +func executeQuery(baseURL string) (heroQuery, error) { + var q heroQuery + + // Set up a GraphQL server. + schema, err := graphqlserver.ParseSchema(starwars.Schema, &starwars.Resolver{}) + if err != nil { + return q, err + } + mux := http.NewServeMux() + mux.Handle("/query", &relay.Handler{Schema: schema}) + + client := graphql.NewClient(fmt.Sprintf("%s/query", baseURL), nil) + + variables := map[string]interface{}{ + "characterID": graphql.ID("1003"), + } + err = client.Query(context.Background(), &q, variables) + if err != nil { + return q, err + } + + return q, nil +} + +type hero struct { + ID graphql.ID `json:"ID"` + Name graphql.String `json:"Name"` +} +type friend struct { + Name graphql.String `json:"Name"` + Typename graphql.String `json:"__typename" graphql:"__typename"` +} +type character struct { + Name graphql.String `json:"Name"` + Friends []friend `json:"Friends"` + AppearsIn []graphql.String `json:"AppearsIn"` +} + +type heroQuery struct { + Hero hero `json:"Hero"` + Character character `json:"character" graphql:"character(id: $characterID)"` +} diff --git a/examples/graphql/consumer/pacts/graphqlconsumer-graphqlprovider.json b/examples/graphql/consumer/pacts/graphqlconsumer-graphqlprovider.json new file mode 100644 index 000000000..09e97def5 --- /dev/null +++ b/examples/graphql/consumer/pacts/graphqlconsumer-graphqlprovider.json @@ -0,0 +1,64 @@ +{ + "consumer": { + "name": "GraphQLConsumer" + }, + "provider": { + "name": "GraphQLProvider" + }, + "interactions": [ + { + "description": "A request to get foo", + "providerState": "User foo exists", + "request": { + "method": "POST", + "path": "/query", + "headers": { + "content-type": "application/json" + }, + "body": { + "query": "query ($characterID:ID!){\n\t\t\t\thero {\n\t\t\t\t\tid,\n\t\t\t\t\tname\n\t\t\t\t},\n\t\t\t\tcharacter(id: $characterID)\n\t\t\t\t{\n\t\t\t\t\tname,\n\t\t\t\t\tfriends{\n\t\t\t\t\t\tname,\n\t\t\t\t\t\t__typename\n\t\t\t\t\t},\n\t\t\t\t\tappearsIn\n\t\t\t\t}\n\t\t\t}", + "variables": { + "characterID": "1003" + } + }, + "matchingRules": { + "$.body.query": { + "match": "regex", + "regex": "query\\s*\\(\\$characterID:ID!\\)\\{\\s*hero\\s*\\{\\s*id,\\s*name\\s*\\},\\s*character\\(id:\\s*\\$characterID\\)\\s*\\{\\s*name,\\s*friends\\{\\s*name,\\s*__typename\\s*\\},\\s*appearsIn\\s*\\}\\s*\\}" + } + } + }, + "response": { + "status": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "data": { + "Hero": { + "ID": "1003", + "Name": "Darth Vader" + }, + "character": { + "Name": "Darth Vader", + "Friends": [ + { + "Name": "Wilhuff Tarkin", + "__typename": "friends" + } + ], + "AppearsIn": [ + "EMPIRE" + ] + } + } + } + } + } + ], + "metadata": { + "pactSpecification": { + "version": "2.0.0" + } + } +} \ No newline at end of file diff --git a/examples/graphql/consumer/pacts/myconsumer-myprovider.json b/examples/graphql/consumer/pacts/myconsumer-myprovider.json new file mode 100644 index 000000000..d1817e667 --- /dev/null +++ b/examples/graphql/consumer/pacts/myconsumer-myprovider.json @@ -0,0 +1,53 @@ +{ + "consumer": { + "name": "MyConsumer" + }, + "provider": { + "name": "MyProvider" + }, + "interactions": [ + { + "description": "A request to get foo", + "providerState": "User foo exists", + "request": { + "method": "POST", + "path": "/query", + "body": { + "query": "query ($characterID:ID!){\n\t\t\t\thero {\n\t\t\t\t\tid,\n\t\t\t\t\tname\n\t\t\t\t},\n\t\t\t\tcharacter(id: $characterID)\n\t\t\t\t{\n\t\t\t\t\tname,\n\t\t\t\t\tfriends{\n\t\t\t\t\t\tname,\n\t\t\t\t\t\t__typename\n\t\t\t\t\t},\n\t\t\t\t\tappearsIn\n\t\t\t\t}\n\t\t\t}", + "variables": { + "characterID": "1003" + } + }, + "matchingRules": { + "$.body.query": { + "match": "regex", + "regex": "query\\s*\\(\\$characterID:ID!\\)\\{\\s*hero\\s*\\{\\s*id,\\s*name\\s*\\},\\s*character\\(id:\\s*\\$characterID\\)\\s*\\{\\s*name,\\s*friends\\{\\s*name,\\s*__typename\\s*\\},\\s*appearsIn\\s*\\}\\s*\\}" + } + } + }, + "response": { + "status": 200, + "headers": { + "Content-Type": "application/json" + }, + "body": { + "lastName": "sampson", + "name": "billy" + }, + "matchingRules": { + "$.body.lastName": { + "match": "type" + }, + "$.body.name": { + "match": "type" + } + } + } + } + ], + "metadata": { + "pactSpecification": { + "version": "2.0.0" + } + } +} \ No newline at end of file diff --git a/go.mod b/go.mod index cb46f8b5e..f31fc68c4 100644 --- a/go.mod +++ b/go.mod @@ -5,16 +5,15 @@ go 1.12 require ( github.com/gin-gonic/gin v1.7.2 github.com/golang/protobuf v1.4.2 // indirect + github.com/graph-gophers/graphql-go v1.3.0 github.com/hashicorp/go-version v1.3.0 - github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 - github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/hashicorp/logutils v1.0.0 + github.com/hasura/go-graphql-client v0.6.3 github.com/json-iterator/go v1.1.10 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/spf13/cobra v0.0.0-20160604044732-f447048345b6 - github.com/spf13/pflag v0.0.0-20160427162146-cb88ea77998c // indirect + github.com/klauspost/compress v1.14.2 // indirect + github.com/spf13/cobra v1.1.3 github.com/stretchr/testify v1.7.0 - golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae // indirect + golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/protobuf v1.25.0 // indirect - gopkg.in/yaml.v2 v2.3.0 // indirect ) diff --git a/go.sum b/go.sum index e69ef5873..3984453ea 100644 --- a/go.sum +++ b/go.sum @@ -1,29 +1,84 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= github.com/gin-gonic/gin v1.7.2 h1:Tg03T9yM2xa8j6I3Z3oqLaQRSmKvxPd6g/2HJ6zICFA= github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -32,6 +87,8 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -39,78 +96,268 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v1.2.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= +github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 h1:oD64EFjELI9RY9yoWlfua58r+etdnoIC871z+rr6lkA= -github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hasura/go-graphql-client v0.6.3 h1:WO9iSR79LA6y/lEgwCvGcOxGxsNBmIBqV7aE2WaClos= +github.com/hasura/go-graphql-client v0.6.3/go.mod h1:kvaJsDhxGbkIJ1jgebkrnt9EDIELZHpsAMint56v+2I= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw= +github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/spf13/cobra v0.0.0-20160604044732-f447048345b6 h1:YNNSoAqYzMYTgQasAb0qMEXGrmT9Zcat0dgVcmt+oso= -github.com/spf13/cobra v0.0.0-20160604044732-f447048345b6/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v0.0.0-20160427162146-cb88ea77998c h1:nKSqf1F72lP/EWecUpeCXINWfP8aAa5TXORk21x0xw8= -github.com/spf13/pflag v0.0.0-20160427162146-cb88ea77998c/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654 h1:id054HUawV2/6IGm2IV8KZQjqtwAOo2CYlOToYqa0d0= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -123,13 +370,27 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c83641619..bc52e96f2 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589a..792994785 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index 1fe3cf3d5..205c28d68 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -16,7 +16,7 @@ // when the code is running on Google App Engine, compiled by GopherJS, or // "-tags safe" is added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe +// +build js appengine safe disableunsafe !go1.4 package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 7c519ff47..1be8ce945 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a7..f78d89fc1 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875bac..b04edb7d7 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/gin-contrib/sse/.travis.yml b/vendor/github.com/gin-contrib/sse/.travis.yml index a556ac09e..d0e8fcf99 100644 --- a/vendor/github.com/gin-contrib/sse/.travis.yml +++ b/vendor/github.com/gin-contrib/sse/.travis.yml @@ -1,15 +1,26 @@ language: go sudo: false go: - - 1.6.4 - - 1.7.4 - - tip + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master git: - depth: 3 + depth: 10 + +matrix: + fast_finish: true + include: + - go: 1.11.x + env: GO111MODULE=on + - go: 1.12.x + env: GO111MODULE=on script: - go test -v -covermode=count -coverprofile=coverage.out after_success: - - bash <(curl -s https://codecov.io/bash) \ No newline at end of file + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/gin-contrib/sse/go.mod b/vendor/github.com/gin-contrib/sse/go.mod new file mode 100644 index 000000000..b9c03f47d --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/go.mod @@ -0,0 +1,5 @@ +module github.com/gin-contrib/sse + +go 1.12 + +require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/gin-contrib/sse/go.sum b/vendor/github.com/gin-contrib/sse/go.sum new file mode 100644 index 000000000..4347755af --- /dev/null +++ b/vendor/github.com/gin-contrib/sse/go.sum @@ -0,0 +1,7 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/gin-gonic/gin/.gitignore b/vendor/github.com/gin-gonic/gin/.gitignore index f3b636dff..bdd50c95c 100644 --- a/vendor/github.com/gin-gonic/gin/.gitignore +++ b/vendor/github.com/gin-gonic/gin/.gitignore @@ -2,3 +2,6 @@ vendor/* !vendor/vendor.json coverage.out count.out +test +profile.out +tmp.out diff --git a/vendor/github.com/gin-gonic/gin/.travis.yml b/vendor/github.com/gin-gonic/gin/.travis.yml index 6532a3342..8ebae7123 100644 --- a/vendor/github.com/gin-gonic/gin/.travis.yml +++ b/vendor/github.com/gin-gonic/gin/.travis.yml @@ -1,21 +1,40 @@ language: go -sudo: false -go: - - 1.6.x - - 1.7.x - - 1.8.x - - master + +matrix: + fast_finish: true + include: + - go: 1.12.x + env: GO111MODULE=on + - go: 1.13.x + - go: 1.13.x + env: + - TESTTAGS=nomsgpack + - go: 1.14.x + - go: 1.14.x + env: + - TESTTAGS=nomsgpack + - go: 1.15.x + - go: 1.15.x + env: + - TESTTAGS=nomsgpack + - go: master git: - depth: 3 + depth: 10 + +before_install: + - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi install: - - make install + - if [[ "${GO111MODULE}" = "on" ]]; then go mod download; fi + - if [[ "${GO111MODULE}" = "on" ]]; then export PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"; fi + - if [[ "${GO111MODULE}" = "on" ]]; then make tools; fi + +go_import_path: github.com/gin-gonic/gin script: - make vet - make fmt-check - - make embedmd - make misspell-check - make test diff --git a/vendor/github.com/gin-gonic/gin/AUTHORS.md b/vendor/github.com/gin-gonic/gin/AUTHORS.md index 7ab7213d9..c634e6be0 100644 --- a/vendor/github.com/gin-gonic/gin/AUTHORS.md +++ b/vendor/github.com/gin-gonic/gin/AUTHORS.md @@ -1,8 +1,12 @@ List of all the awesome people working to make Gin the best Web Framework in Go. +## gin 1.x series authors + +**Gin Core Team:** Bo-Yi Wu (@appleboy), 田欧 (@thinkerou), Javier Provecho (@javierprovecho) + ## gin 0.x series authors -**Maintainer:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho) +**Maintainers:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho) People and companies, who have contributed, in alphabetical order. @@ -152,7 +156,7 @@ People and companies, who have contributed, in alphabetical order. - Fix variadic parameter in the flexible render API - Fix Corrupted plain render - Add Pluggable View Renderer Example - + **@msemenistyi (Mykyta Semenistyi)** - update Readme.md. Add code to String method @@ -186,6 +190,8 @@ People and companies, who have contributed, in alphabetical order. **@rogierlommers (Rogier Lommers)** - Add updated static serve example +**@rw-access (Ross Wolf)** +- Added support to mix exact and param routes **@se77en (Damon Zhao)** - Improve color logging diff --git a/vendor/github.com/gin-gonic/gin/BENCHMARKS.md b/vendor/github.com/gin-gonic/gin/BENCHMARKS.md index 6efe3ca42..c11ee99ae 100644 --- a/vendor/github.com/gin-gonic/gin/BENCHMARKS.md +++ b/vendor/github.com/gin-gonic/gin/BENCHMARKS.md @@ -1,298 +1,666 @@ -**Machine:** intel i7 ivy bridge quad-core. 8GB RAM. -**Date:** June 4th, 2015 -[https://github.com/gin-gonic/go-http-routing-benchmark](https://github.com/gin-gonic/go-http-routing-benchmark) +# Benchmark System + +**VM HOST:** Travis +**Machine:** Ubuntu 16.04.6 LTS x64 +**Date:** May 04th, 2020 +**Version:** Gin v1.6.3 +**Go Version:** 1.14.2 linux/amd64 +**Source:** [Go HTTP Router Benchmark](https://github.com/gin-gonic/go-http-routing-benchmark) +**Result:** [See the gist](https://gist.github.com/appleboy/b5f2ecfaf50824ae9c64dcfb9165ae5e) or [Travis result](https://travis-ci.org/github/gin-gonic/go-http-routing-benchmark/jobs/682947061) + +## Static Routes: 157 + +```sh +Gin: 34936 Bytes + +HttpServeMux: 14512 Bytes +Ace: 30680 Bytes +Aero: 34536 Bytes +Bear: 30456 Bytes +Beego: 98456 Bytes +Bone: 40224 Bytes +Chi: 83608 Bytes +Denco: 10216 Bytes +Echo: 80328 Bytes +GocraftWeb: 55288 Bytes +Goji: 29744 Bytes +Gojiv2: 105840 Bytes +GoJsonRest: 137496 Bytes +GoRestful: 816936 Bytes +GorillaMux: 585632 Bytes +GowwwRouter: 24968 Bytes +HttpRouter: 21712 Bytes +HttpTreeMux: 73448 Bytes +Kocha: 115472 Bytes +LARS: 30640 Bytes +Macaron: 38592 Bytes +Martini: 310864 Bytes +Pat: 19696 Bytes +Possum: 89920 Bytes +R2router: 23712 Bytes +Rivet: 24608 Bytes +Tango: 28264 Bytes +TigerTonic: 78768 Bytes +Traffic: 538976 Bytes +Vulcan: 369960 Bytes +``` + +## GithubAPI Routes: 203 + +```sh +Gin: 58512 Bytes + +Ace: 48688 Bytes +Aero: 318568 Bytes +Bear: 84248 Bytes +Beego: 150936 Bytes +Bone: 100976 Bytes +Chi: 95112 Bytes +Denco: 36736 Bytes +Echo: 100296 Bytes +GocraftWeb: 95432 Bytes +Goji: 49680 Bytes +Gojiv2: 104704 Bytes +GoJsonRest: 141976 Bytes +GoRestful: 1241656 Bytes +GorillaMux: 1322784 Bytes +GowwwRouter: 80008 Bytes +HttpRouter: 37144 Bytes +HttpTreeMux: 78800 Bytes +Kocha: 785120 Bytes +LARS: 48600 Bytes +Macaron: 92784 Bytes +Martini: 485264 Bytes +Pat: 21200 Bytes +Possum: 85312 Bytes +R2router: 47104 Bytes +Rivet: 42840 Bytes +Tango: 54840 Bytes +TigerTonic: 95264 Bytes +Traffic: 921744 Bytes +Vulcan: 425992 Bytes +``` + +## GPlusAPI Routes: 13 + +```sh +Gin: 4384 Bytes + +Ace: 3712 Bytes +Aero: 26056 Bytes +Bear: 7112 Bytes +Beego: 10272 Bytes +Bone: 6688 Bytes +Chi: 8024 Bytes +Denco: 3264 Bytes +Echo: 9688 Bytes +GocraftWeb: 7496 Bytes +Goji: 3152 Bytes +Gojiv2: 7376 Bytes +GoJsonRest: 11400 Bytes +GoRestful: 74328 Bytes +GorillaMux: 66208 Bytes +GowwwRouter: 5744 Bytes +HttpRouter: 2808 Bytes +HttpTreeMux: 7440 Bytes +Kocha: 128880 Bytes +LARS: 3656 Bytes +Macaron: 8656 Bytes +Martini: 23920 Bytes +Pat: 1856 Bytes +Possum: 7248 Bytes +R2router: 3928 Bytes +Rivet: 3064 Bytes +Tango: 5168 Bytes +TigerTonic: 9408 Bytes +Traffic: 46400 Bytes +Vulcan: 25544 Bytes ``` -BenchmarkAce_Param 5000000 372 ns/op 32 B/op 1 allocs/op -BenchmarkBear_Param 1000000 1165 ns/op 424 B/op 5 allocs/op -BenchmarkBeego_Param 1000000 2440 ns/op 720 B/op 10 allocs/op -BenchmarkBone_Param 1000000 1067 ns/op 384 B/op 3 allocs/op -BenchmarkDenco_Param 5000000 240 ns/op 32 B/op 1 allocs/op -BenchmarkEcho_Param 10000000 130 ns/op 0 B/op 0 allocs/op -BenchmarkGin_Param 10000000 133 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_Param 1000000 1826 ns/op 656 B/op 9 allocs/op -BenchmarkGoji_Param 2000000 957 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_Param 1000000 2021 ns/op 657 B/op 14 allocs/op -BenchmarkGoRestful_Param 200000 8825 ns/op 2496 B/op 31 allocs/op -BenchmarkGorillaMux_Param 500000 3340 ns/op 784 B/op 9 allocs/op -BenchmarkHttpRouter_Param 10000000 152 ns/op 32 B/op 1 allocs/op -BenchmarkHttpTreeMux_Param 2000000 717 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_Param 3000000 423 ns/op 56 B/op 3 allocs/op -BenchmarkMacaron_Param 1000000 3410 ns/op 1104 B/op 11 allocs/op -BenchmarkMartini_Param 200000 7101 ns/op 1152 B/op 12 allocs/op -BenchmarkPat_Param 1000000 2040 ns/op 656 B/op 14 allocs/op -BenchmarkPossum_Param 1000000 2048 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_Param 1000000 1144 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_Param 200000 6725 ns/op 1672 B/op 28 allocs/op -BenchmarkRivet_Param 1000000 1121 ns/op 464 B/op 5 allocs/op -BenchmarkTango_Param 1000000 1479 ns/op 256 B/op 10 allocs/op -BenchmarkTigerTonic_Param 1000000 3393 ns/op 992 B/op 19 allocs/op -BenchmarkTraffic_Param 300000 5525 ns/op 1984 B/op 23 allocs/op -BenchmarkVulcan_Param 2000000 924 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_Param 1000000 1084 ns/op 368 B/op 3 allocs/op -BenchmarkAce_Param5 3000000 614 ns/op 160 B/op 1 allocs/op -BenchmarkBear_Param5 1000000 1617 ns/op 469 B/op 5 allocs/op -BenchmarkBeego_Param5 1000000 3373 ns/op 992 B/op 13 allocs/op -BenchmarkBone_Param5 1000000 1478 ns/op 432 B/op 3 allocs/op -BenchmarkDenco_Param5 3000000 570 ns/op 160 B/op 1 allocs/op -BenchmarkEcho_Param5 5000000 256 ns/op 0 B/op 0 allocs/op -BenchmarkGin_Param5 10000000 222 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_Param5 1000000 2789 ns/op 928 B/op 12 allocs/op -BenchmarkGoji_Param5 1000000 1287 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_Param5 1000000 3670 ns/op 1105 B/op 17 allocs/op -BenchmarkGoRestful_Param5 200000 10756 ns/op 2672 B/op 31 allocs/op -BenchmarkGorillaMux_Param5 300000 5543 ns/op 912 B/op 9 allocs/op -BenchmarkHttpRouter_Param5 5000000 403 ns/op 160 B/op 1 allocs/op -BenchmarkHttpTreeMux_Param5 1000000 1089 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_Param5 1000000 1682 ns/op 440 B/op 10 allocs/op -BenchmarkMacaron_Param5 300000 4596 ns/op 1376 B/op 14 allocs/op -BenchmarkMartini_Param5 100000 15703 ns/op 1280 B/op 12 allocs/op -BenchmarkPat_Param5 300000 5320 ns/op 1008 B/op 42 allocs/op -BenchmarkPossum_Param5 1000000 2155 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_Param5 1000000 1559 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_Param5 200000 8184 ns/op 2024 B/op 35 allocs/op -BenchmarkRivet_Param5 1000000 1914 ns/op 528 B/op 9 allocs/op -BenchmarkTango_Param5 1000000 3280 ns/op 944 B/op 18 allocs/op -BenchmarkTigerTonic_Param5 200000 11638 ns/op 2519 B/op 53 allocs/op -BenchmarkTraffic_Param5 200000 8941 ns/op 2280 B/op 31 allocs/op -BenchmarkVulcan_Param5 1000000 1279 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_Param5 1000000 1574 ns/op 416 B/op 3 allocs/op -BenchmarkAce_Param20 1000000 1528 ns/op 640 B/op 1 allocs/op -BenchmarkBear_Param20 300000 4906 ns/op 1633 B/op 5 allocs/op -BenchmarkBeego_Param20 200000 10529 ns/op 3868 B/op 17 allocs/op -BenchmarkBone_Param20 300000 7362 ns/op 2539 B/op 5 allocs/op -BenchmarkDenco_Param20 1000000 1884 ns/op 640 B/op 1 allocs/op -BenchmarkEcho_Param20 2000000 689 ns/op 0 B/op 0 allocs/op -BenchmarkGin_Param20 3000000 545 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_Param20 200000 9437 ns/op 3804 B/op 16 allocs/op -BenchmarkGoji_Param20 500000 3987 ns/op 1246 B/op 2 allocs/op -BenchmarkGoJsonRest_Param20 100000 12799 ns/op 4492 B/op 21 allocs/op -BenchmarkGoRestful_Param20 100000 19451 ns/op 5244 B/op 33 allocs/op -BenchmarkGorillaMux_Param20 100000 12456 ns/op 3275 B/op 11 allocs/op -BenchmarkHttpRouter_Param20 1000000 1333 ns/op 640 B/op 1 allocs/op -BenchmarkHttpTreeMux_Param20 300000 6490 ns/op 2187 B/op 4 allocs/op -BenchmarkKocha_Param20 300000 5335 ns/op 1808 B/op 27 allocs/op -BenchmarkMacaron_Param20 200000 11325 ns/op 4252 B/op 18 allocs/op -BenchmarkMartini_Param20 20000 64419 ns/op 3644 B/op 14 allocs/op -BenchmarkPat_Param20 50000 24672 ns/op 4888 B/op 151 allocs/op -BenchmarkPossum_Param20 1000000 2085 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_Param20 300000 6809 ns/op 2283 B/op 8 allocs/op -BenchmarkRevel_Param20 100000 16600 ns/op 5551 B/op 54 allocs/op -BenchmarkRivet_Param20 200000 8428 ns/op 2620 B/op 26 allocs/op -BenchmarkTango_Param20 100000 16302 ns/op 8224 B/op 48 allocs/op -BenchmarkTigerTonic_Param20 30000 46828 ns/op 10538 B/op 178 allocs/op -BenchmarkTraffic_Param20 50000 28871 ns/op 7998 B/op 66 allocs/op -BenchmarkVulcan_Param20 1000000 2267 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_Param20 300000 6828 ns/op 2507 B/op 5 allocs/op -BenchmarkAce_ParamWrite 3000000 502 ns/op 40 B/op 2 allocs/op -BenchmarkBear_ParamWrite 1000000 1303 ns/op 424 B/op 5 allocs/op -BenchmarkBeego_ParamWrite 1000000 2489 ns/op 728 B/op 11 allocs/op -BenchmarkBone_ParamWrite 1000000 1181 ns/op 384 B/op 3 allocs/op -BenchmarkDenco_ParamWrite 5000000 315 ns/op 32 B/op 1 allocs/op -BenchmarkEcho_ParamWrite 10000000 237 ns/op 8 B/op 1 allocs/op -BenchmarkGin_ParamWrite 5000000 336 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_ParamWrite 1000000 2079 ns/op 664 B/op 10 allocs/op -BenchmarkGoji_ParamWrite 1000000 1092 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_ParamWrite 1000000 3329 ns/op 1136 B/op 19 allocs/op -BenchmarkGoRestful_ParamWrite 200000 9273 ns/op 2504 B/op 32 allocs/op -BenchmarkGorillaMux_ParamWrite 500000 3919 ns/op 792 B/op 10 allocs/op -BenchmarkHttpRouter_ParamWrite 10000000 223 ns/op 32 B/op 1 allocs/op -BenchmarkHttpTreeMux_ParamWrite 2000000 788 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_ParamWrite 3000000 549 ns/op 56 B/op 3 allocs/op -BenchmarkMacaron_ParamWrite 500000 4558 ns/op 1216 B/op 16 allocs/op -BenchmarkMartini_ParamWrite 200000 8850 ns/op 1256 B/op 16 allocs/op -BenchmarkPat_ParamWrite 500000 3679 ns/op 1088 B/op 19 allocs/op -BenchmarkPossum_ParamWrite 1000000 2114 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_ParamWrite 1000000 1320 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_ParamWrite 200000 8048 ns/op 2128 B/op 33 allocs/op -BenchmarkRivet_ParamWrite 1000000 1393 ns/op 472 B/op 6 allocs/op -BenchmarkTango_ParamWrite 2000000 819 ns/op 136 B/op 5 allocs/op -BenchmarkTigerTonic_ParamWrite 300000 5860 ns/op 1440 B/op 25 allocs/op -BenchmarkTraffic_ParamWrite 200000 7429 ns/op 2400 B/op 27 allocs/op -BenchmarkVulcan_ParamWrite 2000000 972 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_ParamWrite 1000000 1226 ns/op 368 B/op 3 allocs/op -BenchmarkAce_GithubStatic 5000000 294 ns/op 0 B/op 0 allocs/op -BenchmarkBear_GithubStatic 3000000 575 ns/op 88 B/op 3 allocs/op -BenchmarkBeego_GithubStatic 1000000 1561 ns/op 368 B/op 7 allocs/op -BenchmarkBone_GithubStatic 200000 12301 ns/op 2880 B/op 60 allocs/op -BenchmarkDenco_GithubStatic 20000000 74.6 ns/op 0 B/op 0 allocs/op -BenchmarkEcho_GithubStatic 10000000 176 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GithubStatic 10000000 159 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GithubStatic 1000000 1116 ns/op 304 B/op 6 allocs/op -BenchmarkGoji_GithubStatic 5000000 413 ns/op 0 B/op 0 allocs/op -BenchmarkGoRestful_GithubStatic 30000 55200 ns/op 3520 B/op 36 allocs/op -BenchmarkGoJsonRest_GithubStatic 1000000 1504 ns/op 337 B/op 12 allocs/op -BenchmarkGorillaMux_GithubStatic 100000 23620 ns/op 464 B/op 8 allocs/op -BenchmarkHttpRouter_GithubStatic 20000000 78.3 ns/op 0 B/op 0 allocs/op -BenchmarkHttpTreeMux_GithubStatic 20000000 84.9 ns/op 0 B/op 0 allocs/op -BenchmarkKocha_GithubStatic 20000000 111 ns/op 0 B/op 0 allocs/op -BenchmarkMacaron_GithubStatic 1000000 2686 ns/op 752 B/op 8 allocs/op -BenchmarkMartini_GithubStatic 100000 22244 ns/op 832 B/op 11 allocs/op -BenchmarkPat_GithubStatic 100000 13278 ns/op 3648 B/op 76 allocs/op -BenchmarkPossum_GithubStatic 1000000 1429 ns/op 480 B/op 4 allocs/op -BenchmarkR2router_GithubStatic 2000000 726 ns/op 144 B/op 5 allocs/op -BenchmarkRevel_GithubStatic 300000 6271 ns/op 1288 B/op 25 allocs/op -BenchmarkRivet_GithubStatic 3000000 474 ns/op 112 B/op 2 allocs/op -BenchmarkTango_GithubStatic 1000000 1842 ns/op 256 B/op 10 allocs/op -BenchmarkTigerTonic_GithubStatic 5000000 361 ns/op 48 B/op 1 allocs/op -BenchmarkTraffic_GithubStatic 30000 47197 ns/op 18920 B/op 149 allocs/op -BenchmarkVulcan_GithubStatic 1000000 1415 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GithubStatic 1000000 2522 ns/op 512 B/op 11 allocs/op -BenchmarkAce_GithubParam 3000000 578 ns/op 96 B/op 1 allocs/op -BenchmarkBear_GithubParam 1000000 1592 ns/op 464 B/op 5 allocs/op -BenchmarkBeego_GithubParam 1000000 2891 ns/op 784 B/op 11 allocs/op -BenchmarkBone_GithubParam 300000 6440 ns/op 1456 B/op 16 allocs/op -BenchmarkDenco_GithubParam 3000000 514 ns/op 128 B/op 1 allocs/op -BenchmarkEcho_GithubParam 5000000 292 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GithubParam 10000000 242 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GithubParam 1000000 2343 ns/op 720 B/op 10 allocs/op -BenchmarkGoji_GithubParam 1000000 1566 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_GithubParam 1000000 2828 ns/op 721 B/op 15 allocs/op -BenchmarkGoRestful_GithubParam 10000 177711 ns/op 2816 B/op 35 allocs/op -BenchmarkGorillaMux_GithubParam 100000 13591 ns/op 816 B/op 9 allocs/op -BenchmarkHttpRouter_GithubParam 5000000 352 ns/op 96 B/op 1 allocs/op -BenchmarkHttpTreeMux_GithubParam 2000000 973 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_GithubParam 2000000 889 ns/op 128 B/op 5 allocs/op -BenchmarkMacaron_GithubParam 500000 4047 ns/op 1168 B/op 12 allocs/op -BenchmarkMartini_GithubParam 50000 28982 ns/op 1184 B/op 12 allocs/op -BenchmarkPat_GithubParam 200000 8747 ns/op 2480 B/op 56 allocs/op -BenchmarkPossum_GithubParam 1000000 2158 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_GithubParam 1000000 1352 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_GithubParam 200000 7673 ns/op 1784 B/op 30 allocs/op -BenchmarkRivet_GithubParam 1000000 1573 ns/op 480 B/op 6 allocs/op -BenchmarkTango_GithubParam 1000000 2418 ns/op 480 B/op 13 allocs/op -BenchmarkTigerTonic_GithubParam 300000 6048 ns/op 1440 B/op 28 allocs/op -BenchmarkTraffic_GithubParam 100000 20143 ns/op 6024 B/op 55 allocs/op -BenchmarkVulcan_GithubParam 1000000 2224 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GithubParam 500000 4156 ns/op 1312 B/op 12 allocs/op -BenchmarkAce_GithubAll 10000 109482 ns/op 13792 B/op 167 allocs/op -BenchmarkBear_GithubAll 10000 287490 ns/op 79952 B/op 943 allocs/op -BenchmarkBeego_GithubAll 3000 562184 ns/op 146272 B/op 2092 allocs/op -BenchmarkBone_GithubAll 500 2578716 ns/op 648016 B/op 8119 allocs/op -BenchmarkDenco_GithubAll 20000 94955 ns/op 20224 B/op 167 allocs/op -BenchmarkEcho_GithubAll 30000 58705 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GithubAll 30000 50991 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GithubAll 5000 449648 ns/op 133280 B/op 1889 allocs/op -BenchmarkGoji_GithubAll 2000 689748 ns/op 56113 B/op 334 allocs/op -BenchmarkGoJsonRest_GithubAll 5000 537769 ns/op 135995 B/op 2940 allocs/op -BenchmarkGoRestful_GithubAll 100 18410628 ns/op 797236 B/op 7725 allocs/op -BenchmarkGorillaMux_GithubAll 200 8036360 ns/op 153137 B/op 1791 allocs/op -BenchmarkHttpRouter_GithubAll 20000 63506 ns/op 13792 B/op 167 allocs/op -BenchmarkHttpTreeMux_GithubAll 10000 165927 ns/op 56112 B/op 334 allocs/op -BenchmarkKocha_GithubAll 10000 171362 ns/op 23304 B/op 843 allocs/op -BenchmarkMacaron_GithubAll 2000 817008 ns/op 224960 B/op 2315 allocs/op -BenchmarkMartini_GithubAll 100 12609209 ns/op 237952 B/op 2686 allocs/op -BenchmarkPat_GithubAll 300 4830398 ns/op 1504101 B/op 32222 allocs/op -BenchmarkPossum_GithubAll 10000 301716 ns/op 97440 B/op 812 allocs/op -BenchmarkR2router_GithubAll 10000 270691 ns/op 77328 B/op 1182 allocs/op -BenchmarkRevel_GithubAll 1000 1491919 ns/op 345553 B/op 5918 allocs/op -BenchmarkRivet_GithubAll 10000 283860 ns/op 84272 B/op 1079 allocs/op -BenchmarkTango_GithubAll 5000 473821 ns/op 87078 B/op 2470 allocs/op -BenchmarkTigerTonic_GithubAll 2000 1120131 ns/op 241088 B/op 6052 allocs/op -BenchmarkTraffic_GithubAll 200 8708979 ns/op 2664762 B/op 22390 allocs/op -BenchmarkVulcan_GithubAll 5000 353392 ns/op 19894 B/op 609 allocs/op -BenchmarkZeus_GithubAll 2000 944234 ns/op 300688 B/op 2648 allocs/op -BenchmarkAce_GPlusStatic 5000000 251 ns/op 0 B/op 0 allocs/op -BenchmarkBear_GPlusStatic 3000000 415 ns/op 72 B/op 3 allocs/op -BenchmarkBeego_GPlusStatic 1000000 1416 ns/op 352 B/op 7 allocs/op -BenchmarkBone_GPlusStatic 10000000 192 ns/op 32 B/op 1 allocs/op -BenchmarkDenco_GPlusStatic 30000000 47.6 ns/op 0 B/op 0 allocs/op -BenchmarkEcho_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GPlusStatic 10000000 131 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GPlusStatic 1000000 1035 ns/op 288 B/op 6 allocs/op -BenchmarkGoji_GPlusStatic 5000000 304 ns/op 0 B/op 0 allocs/op -BenchmarkGoJsonRest_GPlusStatic 1000000 1286 ns/op 337 B/op 12 allocs/op -BenchmarkGoRestful_GPlusStatic 200000 9649 ns/op 2160 B/op 30 allocs/op -BenchmarkGorillaMux_GPlusStatic 1000000 2346 ns/op 464 B/op 8 allocs/op -BenchmarkHttpRouter_GPlusStatic 30000000 42.7 ns/op 0 B/op 0 allocs/op -BenchmarkHttpTreeMux_GPlusStatic 30000000 49.5 ns/op 0 B/op 0 allocs/op -BenchmarkKocha_GPlusStatic 20000000 74.8 ns/op 0 B/op 0 allocs/op -BenchmarkMacaron_GPlusStatic 1000000 2520 ns/op 736 B/op 8 allocs/op -BenchmarkMartini_GPlusStatic 300000 5310 ns/op 832 B/op 11 allocs/op -BenchmarkPat_GPlusStatic 5000000 398 ns/op 96 B/op 2 allocs/op -BenchmarkPossum_GPlusStatic 1000000 1434 ns/op 480 B/op 4 allocs/op -BenchmarkR2router_GPlusStatic 2000000 646 ns/op 144 B/op 5 allocs/op -BenchmarkRevel_GPlusStatic 300000 6172 ns/op 1272 B/op 25 allocs/op -BenchmarkRivet_GPlusStatic 3000000 444 ns/op 112 B/op 2 allocs/op -BenchmarkTango_GPlusStatic 1000000 1400 ns/op 208 B/op 10 allocs/op -BenchmarkTigerTonic_GPlusStatic 10000000 213 ns/op 32 B/op 1 allocs/op -BenchmarkTraffic_GPlusStatic 1000000 3091 ns/op 1208 B/op 16 allocs/op -BenchmarkVulcan_GPlusStatic 2000000 863 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GPlusStatic 10000000 237 ns/op 16 B/op 1 allocs/op -BenchmarkAce_GPlusParam 3000000 435 ns/op 64 B/op 1 allocs/op -BenchmarkBear_GPlusParam 1000000 1205 ns/op 448 B/op 5 allocs/op -BenchmarkBeego_GPlusParam 1000000 2494 ns/op 720 B/op 10 allocs/op -BenchmarkBone_GPlusParam 1000000 1126 ns/op 384 B/op 3 allocs/op -BenchmarkDenco_GPlusParam 5000000 325 ns/op 64 B/op 1 allocs/op -BenchmarkEcho_GPlusParam 10000000 168 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GPlusParam 10000000 170 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GPlusParam 1000000 1895 ns/op 656 B/op 9 allocs/op -BenchmarkGoji_GPlusParam 1000000 1071 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_GPlusParam 1000000 2282 ns/op 657 B/op 14 allocs/op -BenchmarkGoRestful_GPlusParam 100000 19400 ns/op 2560 B/op 33 allocs/op -BenchmarkGorillaMux_GPlusParam 500000 5001 ns/op 784 B/op 9 allocs/op -BenchmarkHttpRouter_GPlusParam 10000000 240 ns/op 64 B/op 1 allocs/op -BenchmarkHttpTreeMux_GPlusParam 2000000 797 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_GPlusParam 3000000 505 ns/op 56 B/op 3 allocs/op -BenchmarkMacaron_GPlusParam 1000000 3668 ns/op 1104 B/op 11 allocs/op -BenchmarkMartini_GPlusParam 200000 10672 ns/op 1152 B/op 12 allocs/op -BenchmarkPat_GPlusParam 1000000 2376 ns/op 704 B/op 14 allocs/op -BenchmarkPossum_GPlusParam 1000000 2090 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_GPlusParam 1000000 1233 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_GPlusParam 200000 6778 ns/op 1704 B/op 28 allocs/op -BenchmarkRivet_GPlusParam 1000000 1279 ns/op 464 B/op 5 allocs/op -BenchmarkTango_GPlusParam 1000000 1981 ns/op 272 B/op 10 allocs/op -BenchmarkTigerTonic_GPlusParam 500000 3893 ns/op 1064 B/op 19 allocs/op -BenchmarkTraffic_GPlusParam 200000 6585 ns/op 2000 B/op 23 allocs/op -BenchmarkVulcan_GPlusParam 1000000 1233 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GPlusParam 1000000 1350 ns/op 368 B/op 3 allocs/op -BenchmarkAce_GPlus2Params 3000000 512 ns/op 64 B/op 1 allocs/op -BenchmarkBear_GPlus2Params 1000000 1564 ns/op 464 B/op 5 allocs/op -BenchmarkBeego_GPlus2Params 1000000 3043 ns/op 784 B/op 11 allocs/op -BenchmarkBone_GPlus2Params 1000000 3152 ns/op 736 B/op 7 allocs/op -BenchmarkDenco_GPlus2Params 3000000 431 ns/op 64 B/op 1 allocs/op -BenchmarkEcho_GPlus2Params 5000000 247 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GPlus2Params 10000000 219 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GPlus2Params 1000000 2363 ns/op 720 B/op 10 allocs/op -BenchmarkGoji_GPlus2Params 1000000 1540 ns/op 336 B/op 2 allocs/op -BenchmarkGoJsonRest_GPlus2Params 1000000 2872 ns/op 721 B/op 15 allocs/op -BenchmarkGoRestful_GPlus2Params 100000 23030 ns/op 2720 B/op 35 allocs/op -BenchmarkGorillaMux_GPlus2Params 200000 10516 ns/op 816 B/op 9 allocs/op -BenchmarkHttpRouter_GPlus2Params 5000000 273 ns/op 64 B/op 1 allocs/op -BenchmarkHttpTreeMux_GPlus2Params 2000000 939 ns/op 336 B/op 2 allocs/op -BenchmarkKocha_GPlus2Params 2000000 844 ns/op 128 B/op 5 allocs/op -BenchmarkMacaron_GPlus2Params 500000 3914 ns/op 1168 B/op 12 allocs/op -BenchmarkMartini_GPlus2Params 50000 35759 ns/op 1280 B/op 16 allocs/op -BenchmarkPat_GPlus2Params 200000 7089 ns/op 2304 B/op 41 allocs/op -BenchmarkPossum_GPlus2Params 1000000 2093 ns/op 624 B/op 7 allocs/op -BenchmarkR2router_GPlus2Params 1000000 1320 ns/op 432 B/op 6 allocs/op -BenchmarkRevel_GPlus2Params 200000 7351 ns/op 1800 B/op 30 allocs/op -BenchmarkRivet_GPlus2Params 1000000 1485 ns/op 480 B/op 6 allocs/op -BenchmarkTango_GPlus2Params 1000000 2111 ns/op 448 B/op 12 allocs/op -BenchmarkTigerTonic_GPlus2Params 300000 6271 ns/op 1528 B/op 28 allocs/op -BenchmarkTraffic_GPlus2Params 100000 14886 ns/op 3312 B/op 34 allocs/op -BenchmarkVulcan_GPlus2Params 1000000 1883 ns/op 98 B/op 3 allocs/op -BenchmarkZeus_GPlus2Params 1000000 2686 ns/op 784 B/op 6 allocs/op -BenchmarkAce_GPlusAll 300000 5912 ns/op 640 B/op 11 allocs/op -BenchmarkBear_GPlusAll 100000 16448 ns/op 5072 B/op 61 allocs/op -BenchmarkBeego_GPlusAll 50000 32916 ns/op 8976 B/op 129 allocs/op -BenchmarkBone_GPlusAll 50000 25836 ns/op 6992 B/op 76 allocs/op -BenchmarkDenco_GPlusAll 500000 4462 ns/op 672 B/op 11 allocs/op -BenchmarkEcho_GPlusAll 500000 2806 ns/op 0 B/op 0 allocs/op -BenchmarkGin_GPlusAll 500000 2579 ns/op 0 B/op 0 allocs/op -BenchmarkGocraftWeb_GPlusAll 50000 25223 ns/op 8144 B/op 116 allocs/op -BenchmarkGoji_GPlusAll 100000 14237 ns/op 3696 B/op 22 allocs/op -BenchmarkGoJsonRest_GPlusAll 50000 29227 ns/op 8221 B/op 183 allocs/op -BenchmarkGoRestful_GPlusAll 10000 203144 ns/op 36064 B/op 441 allocs/op -BenchmarkGorillaMux_GPlusAll 20000 80906 ns/op 9712 B/op 115 allocs/op -BenchmarkHttpRouter_GPlusAll 500000 3040 ns/op 640 B/op 11 allocs/op -BenchmarkHttpTreeMux_GPlusAll 200000 9627 ns/op 3696 B/op 22 allocs/op -BenchmarkKocha_GPlusAll 200000 8108 ns/op 976 B/op 43 allocs/op -BenchmarkMacaron_GPlusAll 30000 48083 ns/op 13968 B/op 142 allocs/op -BenchmarkMartini_GPlusAll 10000 196978 ns/op 15072 B/op 178 allocs/op -BenchmarkPat_GPlusAll 30000 58865 ns/op 16880 B/op 343 allocs/op -BenchmarkPossum_GPlusAll 100000 19685 ns/op 6240 B/op 52 allocs/op -BenchmarkR2router_GPlusAll 100000 16251 ns/op 5040 B/op 76 allocs/op -BenchmarkRevel_GPlusAll 20000 93489 ns/op 21656 B/op 368 allocs/op -BenchmarkRivet_GPlusAll 100000 16907 ns/op 5408 B/op 64 allocs/op + +## ParseAPI Routes: 26 + +```sh +Gin: 7776 Bytes + +Ace: 6704 Bytes +Aero: 28488 Bytes +Bear: 12320 Bytes +Beego: 19280 Bytes +Bone: 11440 Bytes +Chi: 9744 Bytes +Denco: 4192 Bytes +Echo: 11664 Bytes +GocraftWeb: 12800 Bytes +Goji: 5680 Bytes +Gojiv2: 14464 Bytes +GoJsonRest: 14072 Bytes +GoRestful: 116264 Bytes +GorillaMux: 105880 Bytes +GowwwRouter: 9344 Bytes +HttpRouter: 5072 Bytes +HttpTreeMux: 7848 Bytes +Kocha: 181712 Bytes +LARS: 6632 Bytes +Macaron: 13648 Bytes +Martini: 45888 Bytes +Pat: 2560 Bytes +Possum: 9200 Bytes +R2router: 7056 Bytes +Rivet: 5680 Bytes +Tango: 8920 Bytes +TigerTonic: 9840 Bytes +Traffic: 79096 Bytes +Vulcan: 44504 Bytes +``` + +## Static Routes + +```sh +BenchmarkGin_StaticAll 62169 19319 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_StaticAll 65428 18313 ns/op 0 B/op 0 allocs/op +BenchmarkAero_StaticAll 121132 9632 ns/op 0 B/op 0 allocs/op +BenchmarkHttpServeMux_StaticAll 52626 22758 ns/op 0 B/op 0 allocs/op +BenchmarkBeego_StaticAll 9962 179058 ns/op 55264 B/op 471 allocs/op +BenchmarkBear_StaticAll 14894 80966 ns/op 20272 B/op 469 allocs/op +BenchmarkBone_StaticAll 18718 64065 ns/op 0 B/op 0 allocs/op +BenchmarkChi_StaticAll 10000 149827 ns/op 67824 B/op 471 allocs/op +BenchmarkDenco_StaticAll 211393 5680 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_StaticAll 49341 24343 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_StaticAll 10000 126209 ns/op 46312 B/op 785 allocs/op +BenchmarkGoji_StaticAll 27956 43174 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_StaticAll 3430 370718 ns/op 205984 B/op 1570 allocs/op +BenchmarkGoJsonRest_StaticAll 9134 188888 ns/op 51653 B/op 1727 allocs/op +BenchmarkGoRestful_StaticAll 706 1703330 ns/op 613280 B/op 2053 allocs/op +BenchmarkGorillaMux_StaticAll 1268 924083 ns/op 153233 B/op 1413 allocs/op +BenchmarkGowwwRouter_StaticAll 63374 18935 ns/op 0 B/op 0 allocs/op +BenchmarkHttpRouter_StaticAll 109938 10902 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_StaticAll 109166 10861 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_StaticAll 92258 12992 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_StaticAll 65200 18387 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_StaticAll 5671 291501 ns/op 115553 B/op 1256 allocs/op +BenchmarkMartini_StaticAll 807 1460498 ns/op 125444 B/op 1717 allocs/op +BenchmarkPat_StaticAll 513 2342396 ns/op 602832 B/op 12559 allocs/op +BenchmarkPossum_StaticAll 10000 128270 ns/op 65312 B/op 471 allocs/op +BenchmarkR2router_StaticAll 16726 71760 ns/op 22608 B/op 628 allocs/op +BenchmarkRivet_StaticAll 41722 28723 ns/op 0 B/op 0 allocs/op +BenchmarkTango_StaticAll 7606 205082 ns/op 39209 B/op 1256 allocs/op +BenchmarkTigerTonic_StaticAll 26247 45806 ns/op 7376 B/op 157 allocs/op +BenchmarkTraffic_StaticAll 550 2284518 ns/op 754864 B/op 14601 allocs/op +BenchmarkVulcan_StaticAll 10000 131343 ns/op 15386 B/op 471 allocs/op +``` + +## Micro Benchmarks + +```sh +BenchmarkGin_Param 18785022 63.9 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_Param 14689765 81.5 ns/op 0 B/op 0 allocs/op +BenchmarkAero_Param 23094770 51.2 ns/op 0 B/op 0 allocs/op +BenchmarkBear_Param 1417045 845 ns/op 456 B/op 5 allocs/op +BenchmarkBeego_Param 1000000 1080 ns/op 352 B/op 3 allocs/op +BenchmarkBone_Param 1000000 1463 ns/op 816 B/op 6 allocs/op +BenchmarkChi_Param 1378756 885 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_Param 8557899 143 ns/op 32 B/op 1 allocs/op +BenchmarkEcho_Param 16433347 75.5 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param 1000000 1218 ns/op 648 B/op 8 allocs/op +BenchmarkGoji_Param 1921248 617 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Param 561848 2156 ns/op 1328 B/op 11 allocs/op +BenchmarkGoJsonRest_Param 1000000 1358 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_Param 224857 5307 ns/op 4192 B/op 14 allocs/op +BenchmarkGorillaMux_Param 498313 2459 ns/op 1280 B/op 10 allocs/op +BenchmarkGowwwRouter_Param 1864354 654 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_Param 26269074 47.7 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_Param 2109829 557 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_Param 5050216 243 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_Param 19811712 59.9 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param 662746 2329 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_Param 279902 4260 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_Param 1000000 1382 ns/op 536 B/op 11 allocs/op +BenchmarkPossum_Param 1000000 1014 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_Param 1712559 707 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Param 6648086 182 ns/op 48 B/op 1 allocs/op +BenchmarkTango_Param 1221504 994 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_Param 891661 2261 ns/op 776 B/op 16 allocs/op +BenchmarkTraffic_Param 350059 3598 ns/op 1856 B/op 21 allocs/op +BenchmarkVulcan_Param 2517823 472 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Param5 9214365 130 ns/op 0 B/op 0 allocs/op +BenchmarkAero_Param5 15369013 77.9 ns/op 0 B/op 0 allocs/op +BenchmarkBear_Param5 1000000 1113 ns/op 501 B/op 5 allocs/op +BenchmarkBeego_Param5 1000000 1269 ns/op 352 B/op 3 allocs/op +BenchmarkBone_Param5 986820 1873 ns/op 864 B/op 6 allocs/op +BenchmarkChi_Param5 1000000 1156 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_Param5 3036331 400 ns/op 160 B/op 1 allocs/op +BenchmarkEcho_Param5 6447133 186 ns/op 0 B/op 0 allocs/op +BenchmarkGin_Param5 10786068 110 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param5 844820 1944 ns/op 920 B/op 11 allocs/op +BenchmarkGoji_Param5 1474965 827 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Param5 442820 2516 ns/op 1392 B/op 11 allocs/op +BenchmarkGoJsonRest_Param5 507555 2711 ns/op 1097 B/op 16 allocs/op +BenchmarkGoRestful_Param5 216481 6093 ns/op 4288 B/op 14 allocs/op +BenchmarkGorillaMux_Param5 314402 3628 ns/op 1344 B/op 10 allocs/op +BenchmarkGowwwRouter_Param5 1624660 733 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_Param5 13167324 92.0 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_Param5 1000000 1295 ns/op 576 B/op 6 allocs/op +BenchmarkKocha_Param5 1000000 1138 ns/op 440 B/op 10 allocs/op +BenchmarkLARS_Param5 11580613 105 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param5 473596 2755 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_Param5 230756 5111 ns/op 1232 B/op 11 allocs/op +BenchmarkPat_Param5 469190 3370 ns/op 888 B/op 29 allocs/op +BenchmarkPossum_Param5 1000000 1002 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_Param5 1422129 844 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Param5 2263789 539 ns/op 240 B/op 1 allocs/op +BenchmarkTango_Param5 1000000 1256 ns/op 360 B/op 8 allocs/op +BenchmarkTigerTonic_Param5 175500 7492 ns/op 2279 B/op 39 allocs/op +BenchmarkTraffic_Param5 233631 5816 ns/op 2208 B/op 27 allocs/op +BenchmarkVulcan_Param5 1923416 629 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Param20 4321266 281 ns/op 0 B/op 0 allocs/op +BenchmarkAero_Param20 31501641 35.2 ns/op 0 B/op 0 allocs/op +BenchmarkBear_Param20 335204 3489 ns/op 1665 B/op 5 allocs/op +BenchmarkBeego_Param20 503674 2860 ns/op 352 B/op 3 allocs/op +BenchmarkBone_Param20 298922 4741 ns/op 2031 B/op 6 allocs/op +BenchmarkChi_Param20 878181 1957 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_Param20 1000000 1360 ns/op 640 B/op 1 allocs/op +BenchmarkEcho_Param20 2104946 580 ns/op 0 B/op 0 allocs/op +BenchmarkGin_Param20 4167204 290 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param20 173064 7514 ns/op 3796 B/op 15 allocs/op +BenchmarkGoji_Param20 458778 2651 ns/op 1247 B/op 2 allocs/op +BenchmarkGojiv2_Param20 364862 3178 ns/op 1632 B/op 11 allocs/op +BenchmarkGoJsonRest_Param20 125514 9760 ns/op 4485 B/op 20 allocs/op +BenchmarkGoRestful_Param20 101217 11964 ns/op 6715 B/op 18 allocs/op +BenchmarkGorillaMux_Param20 147654 8132 ns/op 3452 B/op 12 allocs/op +BenchmarkGowwwRouter_Param20 1000000 1225 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_Param20 4920895 247 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_Param20 173202 6605 ns/op 3196 B/op 10 allocs/op +BenchmarkKocha_Param20 345988 3620 ns/op 1808 B/op 27 allocs/op +BenchmarkLARS_Param20 4592326 262 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param20 166492 7286 ns/op 2924 B/op 12 allocs/op +BenchmarkMartini_Param20 122162 10653 ns/op 3595 B/op 13 allocs/op +BenchmarkPat_Param20 78630 15239 ns/op 4424 B/op 93 allocs/op +BenchmarkPossum_Param20 1000000 1008 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_Param20 294981 4587 ns/op 2284 B/op 7 allocs/op +BenchmarkRivet_Param20 691798 2090 ns/op 1024 B/op 1 allocs/op +BenchmarkTango_Param20 842440 2505 ns/op 856 B/op 8 allocs/op +BenchmarkTigerTonic_Param20 38614 31509 ns/op 9870 B/op 119 allocs/op +BenchmarkTraffic_Param20 57633 21107 ns/op 7853 B/op 47 allocs/op +BenchmarkVulcan_Param20 1000000 1178 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParamWrite 7330743 180 ns/op 8 B/op 1 allocs/op +BenchmarkAero_ParamWrite 13833598 86.7 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParamWrite 1363321 867 ns/op 456 B/op 5 allocs/op +BenchmarkBeego_ParamWrite 1000000 1104 ns/op 360 B/op 4 allocs/op +BenchmarkBone_ParamWrite 1000000 1475 ns/op 816 B/op 6 allocs/op +BenchmarkChi_ParamWrite 1320590 892 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_ParamWrite 7093605 172 ns/op 32 B/op 1 allocs/op +BenchmarkEcho_ParamWrite 8434424 161 ns/op 8 B/op 1 allocs/op +BenchmarkGin_ParamWrite 10377034 118 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParamWrite 1000000 1266 ns/op 656 B/op 9 allocs/op +BenchmarkGoji_ParamWrite 1874168 654 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_ParamWrite 459032 2352 ns/op 1360 B/op 13 allocs/op +BenchmarkGoJsonRest_ParamWrite 499434 2145 ns/op 1128 B/op 18 allocs/op +BenchmarkGoRestful_ParamWrite 241087 5470 ns/op 4200 B/op 15 allocs/op +BenchmarkGorillaMux_ParamWrite 425686 2522 ns/op 1280 B/op 10 allocs/op +BenchmarkGowwwRouter_ParamWrite 922172 1778 ns/op 976 B/op 8 allocs/op +BenchmarkHttpRouter_ParamWrite 15392049 77.7 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParamWrite 1973385 597 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_ParamWrite 4262500 281 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_ParamWrite 10764410 113 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParamWrite 486769 2726 ns/op 1176 B/op 14 allocs/op +BenchmarkMartini_ParamWrite 264804 4842 ns/op 1176 B/op 14 allocs/op +BenchmarkPat_ParamWrite 735116 2047 ns/op 960 B/op 15 allocs/op +BenchmarkPossum_ParamWrite 1000000 1004 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_ParamWrite 1592136 768 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_ParamWrite 3582051 339 ns/op 112 B/op 2 allocs/op +BenchmarkTango_ParamWrite 2237337 534 ns/op 136 B/op 4 allocs/op +BenchmarkTigerTonic_ParamWrite 439608 3136 ns/op 1216 B/op 21 allocs/op +BenchmarkTraffic_ParamWrite 306979 4328 ns/op 2280 B/op 25 allocs/op +BenchmarkVulcan_ParamWrite 2529973 472 ns/op 98 B/op 3 allocs/op +``` + +## GitHub + +```sh +BenchmarkGin_GithubStatic 15629472 76.7 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_GithubStatic 15542612 75.9 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GithubStatic 24777151 48.5 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GithubStatic 2788894 435 ns/op 120 B/op 3 allocs/op +BenchmarkBeego_GithubStatic 1000000 1064 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GithubStatic 93507 12838 ns/op 2880 B/op 60 allocs/op +BenchmarkChi_GithubStatic 1387743 860 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GithubStatic 39384996 30.4 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_GithubStatic 12076382 99.1 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubStatic 1596495 756 ns/op 296 B/op 5 allocs/op +BenchmarkGoji_GithubStatic 6364876 189 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_GithubStatic 550202 2098 ns/op 1312 B/op 10 allocs/op +BenchmarkGoRestful_GithubStatic 102183 12552 ns/op 4256 B/op 13 allocs/op +BenchmarkGoJsonRest_GithubStatic 1000000 1029 ns/op 329 B/op 11 allocs/op +BenchmarkGorillaMux_GithubStatic 255552 5190 ns/op 976 B/op 9 allocs/op +BenchmarkGowwwRouter_GithubStatic 15531916 77.1 ns/op 0 B/op 0 allocs/op +BenchmarkHttpRouter_GithubStatic 27920724 43.1 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GithubStatic 21448953 55.8 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_GithubStatic 21405310 56.0 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_GithubStatic 13625156 89.0 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubStatic 1000000 1747 ns/op 736 B/op 8 allocs/op +BenchmarkMartini_GithubStatic 187186 7326 ns/op 768 B/op 9 allocs/op +BenchmarkPat_GithubStatic 109143 11563 ns/op 3648 B/op 76 allocs/op +BenchmarkPossum_GithubStatic 1575898 770 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_GithubStatic 3046231 404 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_GithubStatic 11484826 105 ns/op 0 B/op 0 allocs/op +BenchmarkTango_GithubStatic 1000000 1153 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_GithubStatic 4929780 249 ns/op 48 B/op 1 allocs/op +BenchmarkTraffic_GithubStatic 106351 11819 ns/op 4664 B/op 90 allocs/op +BenchmarkVulcan_GithubStatic 1613271 722 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GithubParam 8386032 143 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GithubParam 11816200 102 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GithubParam 1000000 1012 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_GithubParam 1000000 1157 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GithubParam 184653 6912 ns/op 1888 B/op 19 allocs/op +BenchmarkChi_GithubParam 1000000 1102 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GithubParam 3484798 352 ns/op 128 B/op 1 allocs/op +BenchmarkEcho_GithubParam 6337380 189 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GithubParam 9132032 131 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubParam 1000000 1446 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_GithubParam 1248640 977 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GithubParam 383233 2784 ns/op 1408 B/op 13 allocs/op +BenchmarkGoJsonRest_GithubParam 1000000 1991 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_GithubParam 76414 16015 ns/op 4352 B/op 16 allocs/op +BenchmarkGorillaMux_GithubParam 150026 7663 ns/op 1296 B/op 10 allocs/op +BenchmarkGowwwRouter_GithubParam 1592044 751 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_GithubParam 10420628 115 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GithubParam 1403755 835 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_GithubParam 2286170 533 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_GithubParam 9540374 129 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubParam 533154 2742 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_GithubParam 119397 9638 ns/op 1152 B/op 11 allocs/op +BenchmarkPat_GithubParam 150675 8858 ns/op 2408 B/op 48 allocs/op +BenchmarkPossum_GithubParam 1000000 1001 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_GithubParam 1602886 761 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GithubParam 2986579 409 ns/op 96 B/op 1 allocs/op +BenchmarkTango_GithubParam 1000000 1356 ns/op 344 B/op 8 allocs/op +BenchmarkTigerTonic_GithubParam 388899 3429 ns/op 1176 B/op 22 allocs/op +BenchmarkTraffic_GithubParam 123160 9734 ns/op 2816 B/op 40 allocs/op +BenchmarkVulcan_GithubParam 1000000 1138 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GithubAll 40543 29670 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GithubAll 57632 20648 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GithubAll 9234 216179 ns/op 86448 B/op 943 allocs/op +BenchmarkBeego_GithubAll 7407 243496 ns/op 71456 B/op 609 allocs/op +BenchmarkBone_GithubAll 420 2922835 ns/op 720160 B/op 8620 allocs/op +BenchmarkChi_GithubAll 7620 238331 ns/op 87696 B/op 609 allocs/op +BenchmarkDenco_GithubAll 18355 64494 ns/op 20224 B/op 167 allocs/op +BenchmarkEcho_GithubAll 31251 38479 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GithubAll 43550 27364 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubAll 4117 300062 ns/op 131656 B/op 1686 allocs/op +BenchmarkGoji_GithubAll 3274 416158 ns/op 56112 B/op 334 allocs/op +BenchmarkGojiv2_GithubAll 1402 870518 ns/op 352720 B/op 4321 allocs/op +BenchmarkGoJsonRest_GithubAll 2976 401507 ns/op 134371 B/op 2737 allocs/op +BenchmarkGoRestful_GithubAll 410 2913158 ns/op 910144 B/op 2938 allocs/op +BenchmarkGorillaMux_GithubAll 346 3384987 ns/op 251650 B/op 1994 allocs/op +BenchmarkGowwwRouter_GithubAll 10000 143025 ns/op 72144 B/op 501 allocs/op +BenchmarkHttpRouter_GithubAll 55938 21360 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GithubAll 10000 153944 ns/op 65856 B/op 671 allocs/op +BenchmarkKocha_GithubAll 10000 106315 ns/op 23304 B/op 843 allocs/op +BenchmarkLARS_GithubAll 47779 25084 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubAll 3266 371907 ns/op 149409 B/op 1624 allocs/op +BenchmarkMartini_GithubAll 331 3444706 ns/op 226551 B/op 2325 allocs/op +BenchmarkPat_GithubAll 273 4381818 ns/op 1483152 B/op 26963 allocs/op +BenchmarkPossum_GithubAll 10000 164367 ns/op 84448 B/op 609 allocs/op +BenchmarkR2router_GithubAll 10000 160220 ns/op 77328 B/op 979 allocs/op +BenchmarkRivet_GithubAll 14625 82453 ns/op 16272 B/op 167 allocs/op +BenchmarkTango_GithubAll 6255 279611 ns/op 63826 B/op 1618 allocs/op +BenchmarkTigerTonic_GithubAll 2008 687874 ns/op 193856 B/op 4474 allocs/op +BenchmarkTraffic_GithubAll 355 3478508 ns/op 820744 B/op 14114 allocs/op +BenchmarkVulcan_GithubAll 6885 193333 ns/op 19894 B/op 609 allocs/op +``` + +## Google+ + +```sh +BenchmarkGin_GPlusStatic 19247326 62.2 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_GPlusStatic 20235060 59.2 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GPlusStatic 31978935 37.6 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlusStatic 3516523 341 ns/op 104 B/op 3 allocs/op +BenchmarkBeego_GPlusStatic 1212036 991 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GPlusStatic 6736242 183 ns/op 32 B/op 1 allocs/op +BenchmarkChi_GPlusStatic 1490640 814 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GPlusStatic 55006856 21.8 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_GPlusStatic 17688258 67.9 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusStatic 1829181 666 ns/op 280 B/op 5 allocs/op +BenchmarkGoji_GPlusStatic 9147451 130 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_GPlusStatic 594015 2063 ns/op 1312 B/op 10 allocs/op +BenchmarkGoJsonRest_GPlusStatic 1264906 950 ns/op 329 B/op 11 allocs/op +BenchmarkGoRestful_GPlusStatic 231558 5341 ns/op 3872 B/op 13 allocs/op +BenchmarkGorillaMux_GPlusStatic 908418 1809 ns/op 976 B/op 9 allocs/op +BenchmarkGowwwRouter_GPlusStatic 40684604 29.5 ns/op 0 B/op 0 allocs/op +BenchmarkHttpRouter_GPlusStatic 46742804 25.7 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlusStatic 32567161 36.9 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_GPlusStatic 33800060 35.3 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_GPlusStatic 20431858 60.0 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusStatic 1000000 1745 ns/op 736 B/op 8 allocs/op +BenchmarkMartini_GPlusStatic 442248 3619 ns/op 768 B/op 9 allocs/op +BenchmarkPat_GPlusStatic 4328004 292 ns/op 96 B/op 2 allocs/op +BenchmarkPossum_GPlusStatic 1570753 763 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_GPlusStatic 3339474 355 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_GPlusStatic 18570961 64.7 ns/op 0 B/op 0 allocs/op +BenchmarkTango_GPlusStatic 1388702 860 ns/op 200 B/op 8 allocs/op +BenchmarkTigerTonic_GPlusStatic 7803543 159 ns/op 32 B/op 1 allocs/op +BenchmarkTraffic_GPlusStatic 878605 2171 ns/op 1112 B/op 16 allocs/op +BenchmarkVulcan_GPlusStatic 2742446 437 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlusParam 11626975 105 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GPlusParam 16914322 71.6 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlusParam 1405173 832 ns/op 480 B/op 5 allocs/op +BenchmarkBeego_GPlusParam 1000000 1075 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GPlusParam 1000000 1557 ns/op 816 B/op 6 allocs/op +BenchmarkChi_GPlusParam 1347926 894 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GPlusParam 5513000 212 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_GPlusParam 11884383 101 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GPlusParam 12898952 93.1 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusParam 1000000 1194 ns/op 648 B/op 8 allocs/op +BenchmarkGoji_GPlusParam 1857229 645 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GPlusParam 520939 2322 ns/op 1328 B/op 11 allocs/op +BenchmarkGoJsonRest_GPlusParam 1000000 1536 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_GPlusParam 205449 5800 ns/op 4192 B/op 14 allocs/op +BenchmarkGorillaMux_GPlusParam 395310 3188 ns/op 1280 B/op 10 allocs/op +BenchmarkGowwwRouter_GPlusParam 1851798 667 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_GPlusParam 18420789 65.2 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlusParam 1878463 629 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_GPlusParam 4495610 273 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_GPlusParam 14615976 83.2 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusParam 584145 2549 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_GPlusParam 250501 4583 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_GPlusParam 1000000 1645 ns/op 576 B/op 11 allocs/op +BenchmarkPossum_GPlusParam 1000000 1008 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_GPlusParam 1708191 688 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GPlusParam 5795014 211 ns/op 48 B/op 1 allocs/op +BenchmarkTango_GPlusParam 1000000 1091 ns/op 264 B/op 8 allocs/op +BenchmarkTigerTonic_GPlusParam 760221 2489 ns/op 856 B/op 16 allocs/op +BenchmarkTraffic_GPlusParam 309774 4039 ns/op 1872 B/op 21 allocs/op +BenchmarkVulcan_GPlusParam 1935730 623 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlus2Params 9158314 134 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GPlus2Params 11300517 107 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlus2Params 1239238 961 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_GPlus2Params 1000000 1202 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GPlus2Params 335576 3725 ns/op 1168 B/op 10 allocs/op +BenchmarkChi_GPlus2Params 1000000 1014 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GPlus2Params 4394598 280 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_GPlus2Params 7851861 154 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GPlus2Params 9958588 120 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlus2Params 1000000 1433 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_GPlus2Params 1325134 909 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GPlus2Params 405955 2870 ns/op 1408 B/op 14 allocs/op +BenchmarkGoJsonRest_GPlus2Params 977038 1987 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_GPlus2Params 205018 6142 ns/op 4384 B/op 16 allocs/op +BenchmarkGorillaMux_GPlus2Params 205641 6015 ns/op 1296 B/op 10 allocs/op +BenchmarkGowwwRouter_GPlus2Params 1748542 684 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_GPlus2Params 14047102 87.7 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlus2Params 1418673 828 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_GPlus2Params 2334562 520 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_GPlus2Params 11954094 101 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlus2Params 491552 2890 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_GPlus2Params 120532 9545 ns/op 1200 B/op 13 allocs/op +BenchmarkPat_GPlus2Params 194739 6766 ns/op 2168 B/op 33 allocs/op +BenchmarkPossum_GPlus2Params 1201224 1009 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_GPlus2Params 1575535 756 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GPlus2Params 3698930 325 ns/op 96 B/op 1 allocs/op +BenchmarkTango_GPlus2Params 1000000 1212 ns/op 344 B/op 8 allocs/op +BenchmarkTigerTonic_GPlus2Params 349350 3660 ns/op 1200 B/op 22 allocs/op +BenchmarkTraffic_GPlus2Params 169714 7862 ns/op 2248 B/op 28 allocs/op +BenchmarkVulcan_GPlus2Params 1222288 974 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlusAll 845606 1398 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GPlusAll 1000000 1009 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlusAll 103830 11386 ns/op 5488 B/op 61 allocs/op +BenchmarkBeego_GPlusAll 82653 14784 ns/op 4576 B/op 39 allocs/op +BenchmarkBone_GPlusAll 36601 33123 ns/op 11744 B/op 109 allocs/op +BenchmarkChi_GPlusAll 95264 12831 ns/op 5616 B/op 39 allocs/op +BenchmarkDenco_GPlusAll 567681 2950 ns/op 672 B/op 11 allocs/op +BenchmarkEcho_GPlusAll 720366 1665 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GPlusAll 1000000 1185 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusAll 71575 16365 ns/op 8040 B/op 103 allocs/op +BenchmarkGoji_GPlusAll 136352 9191 ns/op 3696 B/op 22 allocs/op +BenchmarkGojiv2_GPlusAll 38006 31802 ns/op 17616 B/op 154 allocs/op +BenchmarkGoJsonRest_GPlusAll 57238 21561 ns/op 8117 B/op 170 allocs/op +BenchmarkGoRestful_GPlusAll 15147 79276 ns/op 55520 B/op 192 allocs/op +BenchmarkGorillaMux_GPlusAll 24446 48410 ns/op 16112 B/op 128 allocs/op +BenchmarkGowwwRouter_GPlusAll 150112 7770 ns/op 4752 B/op 33 allocs/op +BenchmarkHttpRouter_GPlusAll 1367820 878 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlusAll 166628 8004 ns/op 4032 B/op 38 allocs/op +BenchmarkKocha_GPlusAll 265694 4570 ns/op 976 B/op 43 allocs/op +BenchmarkLARS_GPlusAll 1000000 1068 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusAll 54564 23305 ns/op 9568 B/op 104 allocs/op +BenchmarkMartini_GPlusAll 16274 73845 ns/op 14016 B/op 145 allocs/op +BenchmarkPat_GPlusAll 27181 44478 ns/op 15264 B/op 271 allocs/op +BenchmarkPossum_GPlusAll 122587 10277 ns/op 5408 B/op 39 allocs/op +BenchmarkR2router_GPlusAll 130137 9297 ns/op 5040 B/op 63 allocs/op +BenchmarkRivet_GPlusAll 532438 3323 ns/op 768 B/op 11 allocs/op +BenchmarkTango_GPlusAll 86054 14531 ns/op 3656 B/op 104 allocs/op +BenchmarkTigerTonic_GPlusAll 33936 35356 ns/op 11600 B/op 242 allocs/op +BenchmarkTraffic_GPlusAll 17833 68181 ns/op 26248 B/op 341 allocs/op +BenchmarkVulcan_GPlusAll 120109 9861 ns/op 1274 B/op 39 allocs/op +``` + +## Parse.com + +```sh +BenchmarkGin_ParseStatic 18877833 63.5 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_ParseStatic 19663731 60.8 ns/op 0 B/op 0 allocs/op +BenchmarkAero_ParseStatic 28967341 41.5 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParseStatic 3006984 402 ns/op 120 B/op 3 allocs/op +BenchmarkBeego_ParseStatic 1000000 1031 ns/op 352 B/op 3 allocs/op +BenchmarkBone_ParseStatic 1782482 675 ns/op 144 B/op 3 allocs/op +BenchmarkChi_ParseStatic 1453261 819 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_ParseStatic 45023595 26.5 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_ParseStatic 17330470 69.3 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseStatic 1644006 731 ns/op 296 B/op 5 allocs/op +BenchmarkGoji_ParseStatic 7026930 170 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_ParseStatic 517618 2037 ns/op 1312 B/op 10 allocs/op +BenchmarkGoJsonRest_ParseStatic 1227080 975 ns/op 329 B/op 11 allocs/op +BenchmarkGoRestful_ParseStatic 192458 6659 ns/op 4256 B/op 13 allocs/op +BenchmarkGorillaMux_ParseStatic 744062 2109 ns/op 976 B/op 9 allocs/op +BenchmarkGowwwRouter_ParseStatic 37781062 31.8 ns/op 0 B/op 0 allocs/op +BenchmarkHttpRouter_ParseStatic 45311223 26.5 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParseStatic 21383475 56.1 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_ParseStatic 29953290 40.1 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_ParseStatic 20036196 62.7 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseStatic 1000000 1740 ns/op 736 B/op 8 allocs/op +BenchmarkMartini_ParseStatic 404156 3801 ns/op 768 B/op 9 allocs/op +BenchmarkPat_ParseStatic 1547180 772 ns/op 240 B/op 5 allocs/op +BenchmarkPossum_ParseStatic 1608991 757 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_ParseStatic 3177936 385 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_ParseStatic 17783205 67.4 ns/op 0 B/op 0 allocs/op +BenchmarkTango_ParseStatic 1210777 990 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_ParseStatic 5316440 231 ns/op 48 B/op 1 allocs/op +BenchmarkTraffic_ParseStatic 496050 2539 ns/op 1256 B/op 19 allocs/op +BenchmarkVulcan_ParseStatic 2462798 488 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParseParam 13393669 89.6 ns/op 0 B/op 0 allocs/op +BenchmarkAero_ParseParam 19836619 60.4 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParseParam 1405954 864 ns/op 467 B/op 5 allocs/op +BenchmarkBeego_ParseParam 1000000 1065 ns/op 352 B/op 3 allocs/op +BenchmarkBone_ParseParam 1000000 1698 ns/op 896 B/op 7 allocs/op +BenchmarkChi_ParseParam 1356037 873 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_ParseParam 6241392 204 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_ParseParam 14088100 85.1 ns/op 0 B/op 0 allocs/op +BenchmarkGin_ParseParam 17426064 68.9 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseParam 1000000 1254 ns/op 664 B/op 8 allocs/op +BenchmarkGoji_ParseParam 1682574 713 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_ParseParam 502224 2333 ns/op 1360 B/op 12 allocs/op +BenchmarkGoJsonRest_ParseParam 1000000 1401 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_ParseParam 182623 7097 ns/op 4576 B/op 14 allocs/op +BenchmarkGorillaMux_ParseParam 482332 2477 ns/op 1280 B/op 10 allocs/op +BenchmarkGowwwRouter_ParseParam 1834873 657 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_ParseParam 23593393 51.0 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParseParam 2100160 574 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_ParseParam 4837220 252 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_ParseParam 18411192 66.2 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseParam 571870 2398 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_ParseParam 286262 4268 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_ParseParam 692906 2157 ns/op 992 B/op 15 allocs/op +BenchmarkPossum_ParseParam 1000000 1011 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_ParseParam 1722735 697 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_ParseParam 6058054 203 ns/op 48 B/op 1 allocs/op +BenchmarkTango_ParseParam 1000000 1061 ns/op 280 B/op 8 allocs/op +BenchmarkTigerTonic_ParseParam 890275 2277 ns/op 784 B/op 15 allocs/op +BenchmarkTraffic_ParseParam 351322 3543 ns/op 1896 B/op 21 allocs/op +BenchmarkVulcan_ParseParam 2076544 572 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Parse2Params 11718074 101 ns/op 0 B/op 0 allocs/op +BenchmarkAero_Parse2Params 16264988 73.4 ns/op 0 B/op 0 allocs/op +BenchmarkBear_Parse2Params 1238322 973 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_Parse2Params 1000000 1120 ns/op 352 B/op 3 allocs/op +BenchmarkBone_Parse2Params 1000000 1632 ns/op 848 B/op 6 allocs/op +BenchmarkChi_Parse2Params 1239477 955 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_Parse2Params 4944133 245 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_Parse2Params 10518286 114 ns/op 0 B/op 0 allocs/op +BenchmarkGin_Parse2Params 14505195 82.7 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Parse2Params 1000000 1437 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_Parse2Params 1689883 707 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Parse2Params 502334 2308 ns/op 1344 B/op 11 allocs/op +BenchmarkGoJsonRest_Parse2Params 1000000 1771 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_Parse2Params 159092 7583 ns/op 4928 B/op 14 allocs/op +BenchmarkGorillaMux_Parse2Params 417548 2980 ns/op 1296 B/op 10 allocs/op +BenchmarkGowwwRouter_Parse2Params 1751737 686 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_Parse2Params 18089204 66.3 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_Parse2Params 1556986 777 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_Parse2Params 2493082 485 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_Parse2Params 15350108 78.5 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Parse2Params 530974 2605 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_Parse2Params 247069 4673 ns/op 1152 B/op 11 allocs/op +BenchmarkPat_Parse2Params 816295 2126 ns/op 752 B/op 16 allocs/op +BenchmarkPossum_Parse2Params 1000000 1002 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_Parse2Params 1569771 733 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Parse2Params 4080546 295 ns/op 96 B/op 1 allocs/op +BenchmarkTango_Parse2Params 1000000 1121 ns/op 312 B/op 8 allocs/op +BenchmarkTigerTonic_Parse2Params 399556 3470 ns/op 1168 B/op 22 allocs/op +BenchmarkTraffic_Parse2Params 314194 4159 ns/op 1944 B/op 22 allocs/op +BenchmarkVulcan_Parse2Params 1827559 664 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParseAll 478395 2503 ns/op 0 B/op 0 allocs/op +BenchmarkAero_ParseAll 715392 1658 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParseAll 59191 20124 ns/op 8928 B/op 110 allocs/op +BenchmarkBeego_ParseAll 45507 27266 ns/op 9152 B/op 78 allocs/op +BenchmarkBone_ParseAll 29328 41459 ns/op 16208 B/op 147 allocs/op +BenchmarkChi_ParseAll 48531 25053 ns/op 11232 B/op 78 allocs/op +BenchmarkDenco_ParseAll 325532 4284 ns/op 928 B/op 16 allocs/op +BenchmarkEcho_ParseAll 433771 2759 ns/op 0 B/op 0 allocs/op +BenchmarkGin_ParseAll 576316 2082 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseAll 41500 29692 ns/op 13728 B/op 181 allocs/op +BenchmarkGoji_ParseAll 80833 15563 ns/op 5376 B/op 32 allocs/op +BenchmarkGojiv2_ParseAll 19836 60335 ns/op 34448 B/op 277 allocs/op +BenchmarkGoJsonRest_ParseAll 32210 38027 ns/op 13866 B/op 321 allocs/op +BenchmarkGoRestful_ParseAll 6644 190842 ns/op 117600 B/op 354 allocs/op +BenchmarkGorillaMux_ParseAll 12634 95894 ns/op 30288 B/op 250 allocs/op +BenchmarkGowwwRouter_ParseAll 98152 12159 ns/op 6912 B/op 48 allocs/op +BenchmarkHttpRouter_ParseAll 933208 1273 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParseAll 107191 11554 ns/op 5728 B/op 51 allocs/op +BenchmarkKocha_ParseAll 184862 6225 ns/op 1112 B/op 54 allocs/op +BenchmarkLARS_ParseAll 644546 1858 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseAll 26145 46484 ns/op 19136 B/op 208 allocs/op +BenchmarkMartini_ParseAll 10000 121838 ns/op 25072 B/op 253 allocs/op +BenchmarkPat_ParseAll 25417 47196 ns/op 15216 B/op 308 allocs/op +BenchmarkPossum_ParseAll 58550 20735 ns/op 10816 B/op 78 allocs/op +BenchmarkR2router_ParseAll 72732 16584 ns/op 8352 B/op 120 allocs/op +BenchmarkRivet_ParseAll 281365 4968 ns/op 912 B/op 16 allocs/op +BenchmarkTango_ParseAll 42831 28668 ns/op 7168 B/op 208 allocs/op +BenchmarkTigerTonic_ParseAll 23774 49972 ns/op 16048 B/op 332 allocs/op +BenchmarkTraffic_ParseAll 10000 104679 ns/op 45520 B/op 605 allocs/op +BenchmarkVulcan_ParseAll 64810 18108 ns/op 2548 B/op 78 allocs/op ``` diff --git a/vendor/github.com/gin-gonic/gin/CHANGELOG.md b/vendor/github.com/gin-gonic/gin/CHANGELOG.md index ee485ec3b..a28edc840 100644 --- a/vendor/github.com/gin-gonic/gin/CHANGELOG.md +++ b/vendor/github.com/gin-gonic/gin/CHANGELOG.md @@ -1,6 +1,233 @@ -# CHANGELOG +# Gin ChangeLog -### Gin 1.2 +## Gin v1.7.2 + +### BUGFIXES + +* Fix conflict between param and exact path [#2706](https://github.com/gin-gonic/gin/issues/2706). Close issue [#2682](https://github.com/gin-gonic/gin/issues/2682) [#2696](https://github.com/gin-gonic/gin/issues/2696). + +## Gin v1.7.1 + +### BUGFIXES + +* fix: data race with trustedCIDRs from [#2674](https://github.com/gin-gonic/gin/issues/2674)([#2675](https://github.com/gin-gonic/gin/pull/2675)) + +## Gin v1.7.0 + +### BUGFIXES + +* fix compile error from [#2572](https://github.com/gin-gonic/gin/pull/2572) ([#2600](https://github.com/gin-gonic/gin/pull/2600)) +* fix: print headers without Authorization header on broken pipe ([#2528](https://github.com/gin-gonic/gin/pull/2528)) +* fix(tree): reassign fullpath when register new node ([#2366](https://github.com/gin-gonic/gin/pull/2366)) + +### ENHANCEMENTS + +* Support params and exact routes without creating conflicts ([#2663](https://github.com/gin-gonic/gin/pull/2663)) +* chore: improve render string performance ([#2365](https://github.com/gin-gonic/gin/pull/2365)) +* Sync route tree to httprouter latest code ([#2368](https://github.com/gin-gonic/gin/pull/2368)) +* chore: rename getQueryCache/getFormCache to initQueryCache/initFormCa ([#2375](https://github.com/gin-gonic/gin/pull/2375)) +* chore(performance): improve countParams ([#2378](https://github.com/gin-gonic/gin/pull/2378)) +* Remove some functions that have the same effect as the bytes package ([#2387](https://github.com/gin-gonic/gin/pull/2387)) +* update:SetMode function ([#2321](https://github.com/gin-gonic/gin/pull/2321)) +* remove a unused type SecureJSONPrefix ([#2391](https://github.com/gin-gonic/gin/pull/2391)) +* Add a redirect sample for POST method ([#2389](https://github.com/gin-gonic/gin/pull/2389)) +* Add CustomRecovery builtin middleware ([#2322](https://github.com/gin-gonic/gin/pull/2322)) +* binding: avoid 2038 problem on 32-bit architectures ([#2450](https://github.com/gin-gonic/gin/pull/2450)) +* Prevent panic in Context.GetQuery() when there is no Request ([#2412](https://github.com/gin-gonic/gin/pull/2412)) +* Add GetUint and GetUint64 method on gin.context ([#2487](https://github.com/gin-gonic/gin/pull/2487)) +* update content-disposition header to MIME-style ([#2512](https://github.com/gin-gonic/gin/pull/2512)) +* reduce allocs and improve the render `WriteString` ([#2508](https://github.com/gin-gonic/gin/pull/2508)) +* implement ".Unwrap() error" on Error type ([#2525](https://github.com/gin-gonic/gin/pull/2525)) ([#2526](https://github.com/gin-gonic/gin/pull/2526)) +* Allow bind with a map[string]string ([#2484](https://github.com/gin-gonic/gin/pull/2484)) +* chore: update tree ([#2371](https://github.com/gin-gonic/gin/pull/2371)) +* Support binding for slice/array obj [Rewrite] ([#2302](https://github.com/gin-gonic/gin/pull/2302)) +* basic auth: fix timing oracle ([#2609](https://github.com/gin-gonic/gin/pull/2609)) +* Add mixed param and non-param paths (port of httprouter[#329](https://github.com/gin-gonic/gin/pull/329)) ([#2663](https://github.com/gin-gonic/gin/pull/2663)) +* feat(engine): add trustedproxies and remoteIP ([#2632](https://github.com/gin-gonic/gin/pull/2632)) + +## Gin v1.6.3 + +### ENHANCEMENTS + + * Improve performance: Change `*sync.RWMutex` to `sync.RWMutex` in context. [#2351](https://github.com/gin-gonic/gin/pull/2351) + +## Gin v1.6.2 + +### BUGFIXES + * fix missing initial sync.RWMutex [#2305](https://github.com/gin-gonic/gin/pull/2305) +### ENHANCEMENTS + * Add set samesite in cookie. [#2306](https://github.com/gin-gonic/gin/pull/2306) + +## Gin v1.6.1 + +### BUGFIXES + * Revert "fix accept incoming network connections" [#2294](https://github.com/gin-gonic/gin/pull/2294) + +## Gin v1.6.0 + +### BREAKING + * chore(performance): Improve performance for adding RemoveExtraSlash flag [#2159](https://github.com/gin-gonic/gin/pull/2159) + * drop support govendor [#2148](https://github.com/gin-gonic/gin/pull/2148) + * Added support for SameSite cookie flag [#1615](https://github.com/gin-gonic/gin/pull/1615) +### FEATURES + * add yaml negotiation [#2220](https://github.com/gin-gonic/gin/pull/2220) + * FileFromFS [#2112](https://github.com/gin-gonic/gin/pull/2112) +### BUGFIXES + * Unix Socket Handling [#2280](https://github.com/gin-gonic/gin/pull/2280) + * Use json marshall in context json to fix breaking new line issue. Fixes #2209 [#2228](https://github.com/gin-gonic/gin/pull/2228) + * fix accept incoming network connections [#2216](https://github.com/gin-gonic/gin/pull/2216) + * Fixed a bug in the calculation of the maximum number of parameters [#2166](https://github.com/gin-gonic/gin/pull/2166) + * [FIX] allow empty headers on DataFromReader [#2121](https://github.com/gin-gonic/gin/pull/2121) + * Add mutex for protect Context.Keys map [#1391](https://github.com/gin-gonic/gin/pull/1391) +### ENHANCEMENTS + * Add mitigation for log injection [#2277](https://github.com/gin-gonic/gin/pull/2277) + * tree: range over nodes values [#2229](https://github.com/gin-gonic/gin/pull/2229) + * tree: remove duplicate assignment [#2222](https://github.com/gin-gonic/gin/pull/2222) + * chore: upgrade go-isatty and json-iterator/go [#2215](https://github.com/gin-gonic/gin/pull/2215) + * path: sync code with httprouter [#2212](https://github.com/gin-gonic/gin/pull/2212) + * Use zero-copy approach to convert types between string and byte slice [#2206](https://github.com/gin-gonic/gin/pull/2206) + * Reuse bytes when cleaning the URL paths [#2179](https://github.com/gin-gonic/gin/pull/2179) + * tree: remove one else statement [#2177](https://github.com/gin-gonic/gin/pull/2177) + * tree: sync httprouter update (#2173) (#2172) [#2171](https://github.com/gin-gonic/gin/pull/2171) + * tree: sync part httprouter codes and reduce if/else [#2163](https://github.com/gin-gonic/gin/pull/2163) + * use http method constant [#2155](https://github.com/gin-gonic/gin/pull/2155) + * upgrade go-validator to v10 [#2149](https://github.com/gin-gonic/gin/pull/2149) + * Refactor redirect request in gin.go [#1970](https://github.com/gin-gonic/gin/pull/1970) + * Add build tag nomsgpack [#1852](https://github.com/gin-gonic/gin/pull/1852) +### DOCS + * docs(path): improve comments [#2223](https://github.com/gin-gonic/gin/pull/2223) + * Renew README to fit the modification of SetCookie method [#2217](https://github.com/gin-gonic/gin/pull/2217) + * Fix spelling [#2202](https://github.com/gin-gonic/gin/pull/2202) + * Remove broken link from README. [#2198](https://github.com/gin-gonic/gin/pull/2198) + * Update docs on Context.Done(), Context.Deadline() and Context.Err() [#2196](https://github.com/gin-gonic/gin/pull/2196) + * Update validator to v10 [#2190](https://github.com/gin-gonic/gin/pull/2190) + * upgrade go-validator to v10 for README [#2189](https://github.com/gin-gonic/gin/pull/2189) + * Update to currently output [#2188](https://github.com/gin-gonic/gin/pull/2188) + * Fix "Custom Validators" example [#2186](https://github.com/gin-gonic/gin/pull/2186) + * Add project to README [#2165](https://github.com/gin-gonic/gin/pull/2165) + * docs(benchmarks): for gin v1.5 [#2153](https://github.com/gin-gonic/gin/pull/2153) + * Changed wording for clarity in README.md [#2122](https://github.com/gin-gonic/gin/pull/2122) +### MISC + * ci support go1.14 [#2262](https://github.com/gin-gonic/gin/pull/2262) + * chore: upgrade depend version [#2231](https://github.com/gin-gonic/gin/pull/2231) + * Drop support go1.10 [#2147](https://github.com/gin-gonic/gin/pull/2147) + * fix comment in `mode.go` [#2129](https://github.com/gin-gonic/gin/pull/2129) + +## Gin v1.5.0 + +- [FIX] Use DefaultWriter and DefaultErrorWriter for debug messages [#1891](https://github.com/gin-gonic/gin/pull/1891) +- [NEW] Now you can parse the inline lowercase start structure [#1893](https://github.com/gin-gonic/gin/pull/1893) +- [FIX] Some code improvements [#1909](https://github.com/gin-gonic/gin/pull/1909) +- [FIX] Use encode replace json marshal increase json encoder speed [#1546](https://github.com/gin-gonic/gin/pull/1546) +- [NEW] Hold matched route full path in the Context [#1826](https://github.com/gin-gonic/gin/pull/1826) +- [FIX] Fix context.Params race condition on Copy() [#1841](https://github.com/gin-gonic/gin/pull/1841) +- [NEW] Add context param query cache [#1450](https://github.com/gin-gonic/gin/pull/1450) +- [FIX] Improve GetQueryMap performance [#1918](https://github.com/gin-gonic/gin/pull/1918) +- [FIX] Improve get post data [#1920](https://github.com/gin-gonic/gin/pull/1920) +- [FIX] Use context instead of x/net/context [#1922](https://github.com/gin-gonic/gin/pull/1922) +- [FIX] Attempt to fix PostForm cache bug [#1931](https://github.com/gin-gonic/gin/pull/1931) +- [NEW] Add support of multipart multi files [#1949](https://github.com/gin-gonic/gin/pull/1949) +- [NEW] Support bind http header param [#1957](https://github.com/gin-gonic/gin/pull/1957) +- [FIX] Drop support for go1.8 and go1.9 [#1933](https://github.com/gin-gonic/gin/pull/1933) +- [FIX] Bugfix for the FullPath feature [#1919](https://github.com/gin-gonic/gin/pull/1919) +- [FIX] Gin1.5 bytes.Buffer to strings.Builder [#1939](https://github.com/gin-gonic/gin/pull/1939) +- [FIX] Upgrade github.com/ugorji/go/codec [#1969](https://github.com/gin-gonic/gin/pull/1969) +- [NEW] Support bind unix time [#1980](https://github.com/gin-gonic/gin/pull/1980) +- [FIX] Simplify code [#2004](https://github.com/gin-gonic/gin/pull/2004) +- [NEW] Support negative Content-Length in DataFromReader [#1981](https://github.com/gin-gonic/gin/pull/1981) +- [FIX] Identify terminal on a RISC-V architecture for auto-colored logs [#2019](https://github.com/gin-gonic/gin/pull/2019) +- [BREAKING] `Context.JSONP()` now expects a semicolon (`;`) at the end [#2007](https://github.com/gin-gonic/gin/pull/2007) +- [BREAKING] Upgrade default `binding.Validator` to v9 (see [its changelog](https://github.com/go-playground/validator/releases/tag/v9.0.0)) [#1015](https://github.com/gin-gonic/gin/pull/1015) +- [NEW] Add `DisallowUnknownFields()` in `Context.BindJSON()` [#2028](https://github.com/gin-gonic/gin/pull/2028) +- [NEW] Use specific `net.Listener` with `Engine.RunListener()` [#2023](https://github.com/gin-gonic/gin/pull/2023) +- [FIX] Fix some typo [#2079](https://github.com/gin-gonic/gin/pull/2079) [#2080](https://github.com/gin-gonic/gin/pull/2080) +- [FIX] Relocate binding body tests [#2086](https://github.com/gin-gonic/gin/pull/2086) +- [FIX] Use Writer in Context.Status [#1606](https://github.com/gin-gonic/gin/pull/1606) +- [FIX] `Engine.RunUnix()` now returns the error if it can't change the file mode [#2093](https://github.com/gin-gonic/gin/pull/2093) +- [FIX] `RouterGroup.StaticFS()` leaked files. Now it closes them. [#2118](https://github.com/gin-gonic/gin/pull/2118) +- [FIX] `Context.Request.FormFile` leaked file. Now it closes it. [#2114](https://github.com/gin-gonic/gin/pull/2114) +- [FIX] Ignore walking on `form:"-"` mapping [#1943](https://github.com/gin-gonic/gin/pull/1943) + +### Gin v1.4.0 + +- [NEW] Support for [Go Modules](https://github.com/golang/go/wiki/Modules) [#1569](https://github.com/gin-gonic/gin/pull/1569) +- [NEW] Refactor of form mapping multipart request [#1829](https://github.com/gin-gonic/gin/pull/1829) +- [FIX] Truncate Latency precision in long running request [#1830](https://github.com/gin-gonic/gin/pull/1830) +- [FIX] IsTerm flag should not be affected by DisableConsoleColor method. [#1802](https://github.com/gin-gonic/gin/pull/1802) +- [NEW] Supporting file binding [#1264](https://github.com/gin-gonic/gin/pull/1264) +- [NEW] Add support for mapping arrays [#1797](https://github.com/gin-gonic/gin/pull/1797) +- [FIX] Readme updates [#1793](https://github.com/gin-gonic/gin/pull/1793) [#1788](https://github.com/gin-gonic/gin/pull/1788) [1789](https://github.com/gin-gonic/gin/pull/1789) +- [FIX] StaticFS: Fixed Logging two log lines on 404. [#1805](https://github.com/gin-gonic/gin/pull/1805), [#1804](https://github.com/gin-gonic/gin/pull/1804) +- [NEW] Make context.Keys available as LogFormatterParams [#1779](https://github.com/gin-gonic/gin/pull/1779) +- [NEW] Use internal/json for Marshal/Unmarshal [#1791](https://github.com/gin-gonic/gin/pull/1791) +- [NEW] Support mapping time.Duration [#1794](https://github.com/gin-gonic/gin/pull/1794) +- [NEW] Refactor form mappings [#1749](https://github.com/gin-gonic/gin/pull/1749) +- [NEW] Added flag to context.Stream indicates if client disconnected in middle of stream [#1252](https://github.com/gin-gonic/gin/pull/1252) +- [FIX] Moved [examples](https://github.com/gin-gonic/examples) to stand alone Repo [#1775](https://github.com/gin-gonic/gin/pull/1775) +- [NEW] Extend context.File to allow for the content-disposition attachments via a new method context.Attachment [#1260](https://github.com/gin-gonic/gin/pull/1260) +- [FIX] Support HTTP content negotiation wildcards [#1112](https://github.com/gin-gonic/gin/pull/1112) +- [NEW] Add prefix from X-Forwarded-Prefix in redirectTrailingSlash [#1238](https://github.com/gin-gonic/gin/pull/1238) +- [FIX] context.Copy() race condition [#1020](https://github.com/gin-gonic/gin/pull/1020) +- [NEW] Add context.HandlerNames() [#1729](https://github.com/gin-gonic/gin/pull/1729) +- [FIX] Change color methods to public in the defaultLogger. [#1771](https://github.com/gin-gonic/gin/pull/1771) +- [FIX] Update writeHeaders method to use http.Header.Set [#1722](https://github.com/gin-gonic/gin/pull/1722) +- [NEW] Add response size to LogFormatterParams [#1752](https://github.com/gin-gonic/gin/pull/1752) +- [NEW] Allow ignoring field on form mapping [#1733](https://github.com/gin-gonic/gin/pull/1733) +- [NEW] Add a function to force color in console output. [#1724](https://github.com/gin-gonic/gin/pull/1724) +- [FIX] Context.Next() - recheck len of handlers on every iteration. [#1745](https://github.com/gin-gonic/gin/pull/1745) +- [FIX] Fix all errcheck warnings [#1739](https://github.com/gin-gonic/gin/pull/1739) [#1653](https://github.com/gin-gonic/gin/pull/1653) +- [NEW] context: inherits context cancellation and deadline from http.Request context for Go>=1.7 [#1690](https://github.com/gin-gonic/gin/pull/1690) +- [NEW] Binding for URL Params [#1694](https://github.com/gin-gonic/gin/pull/1694) +- [NEW] Add LoggerWithFormatter method [#1677](https://github.com/gin-gonic/gin/pull/1677) +- [FIX] CI testing updates [#1671](https://github.com/gin-gonic/gin/pull/1671) [#1670](https://github.com/gin-gonic/gin/pull/1670) [#1682](https://github.com/gin-gonic/gin/pull/1682) [#1669](https://github.com/gin-gonic/gin/pull/1669) +- [FIX] StaticFS(): Send 404 when path does not exist [#1663](https://github.com/gin-gonic/gin/pull/1663) +- [FIX] Handle nil body for JSON binding [#1638](https://github.com/gin-gonic/gin/pull/1638) +- [FIX] Support bind uri param [#1612](https://github.com/gin-gonic/gin/pull/1612) +- [FIX] recovery: fix issue with syscall import on google app engine [#1640](https://github.com/gin-gonic/gin/pull/1640) +- [FIX] Make sure the debug log contains line breaks [#1650](https://github.com/gin-gonic/gin/pull/1650) +- [FIX] Panic stack trace being printed during recovery of broken pipe [#1089](https://github.com/gin-gonic/gin/pull/1089) [#1259](https://github.com/gin-gonic/gin/pull/1259) +- [NEW] RunFd method to run http.Server through a file descriptor [#1609](https://github.com/gin-gonic/gin/pull/1609) +- [NEW] Yaml binding support [#1618](https://github.com/gin-gonic/gin/pull/1618) +- [FIX] Pass MaxMultipartMemory when FormFile is called [#1600](https://github.com/gin-gonic/gin/pull/1600) +- [FIX] LoadHTML* tests [#1559](https://github.com/gin-gonic/gin/pull/1559) +- [FIX] Removed use of sync.pool from HandleContext [#1565](https://github.com/gin-gonic/gin/pull/1565) +- [FIX] Format output log to os.Stderr [#1571](https://github.com/gin-gonic/gin/pull/1571) +- [FIX] Make logger use a yellow background and a darkgray text for legibility [#1570](https://github.com/gin-gonic/gin/pull/1570) +- [FIX] Remove sensitive request information from panic log. [#1370](https://github.com/gin-gonic/gin/pull/1370) +- [FIX] log.Println() does not print timestamp [#829](https://github.com/gin-gonic/gin/pull/829) [#1560](https://github.com/gin-gonic/gin/pull/1560) +- [NEW] Add PureJSON renderer [#694](https://github.com/gin-gonic/gin/pull/694) +- [FIX] Add missing copyright and update if/else [#1497](https://github.com/gin-gonic/gin/pull/1497) +- [FIX] Update msgpack usage [#1498](https://github.com/gin-gonic/gin/pull/1498) +- [FIX] Use protobuf on render [#1496](https://github.com/gin-gonic/gin/pull/1496) +- [FIX] Add support for Protobuf format response [#1479](https://github.com/gin-gonic/gin/pull/1479) +- [NEW] Set default time format in form binding [#1487](https://github.com/gin-gonic/gin/pull/1487) +- [FIX] Add BindXML and ShouldBindXML [#1485](https://github.com/gin-gonic/gin/pull/1485) +- [NEW] Upgrade dependency libraries [#1491](https://github.com/gin-gonic/gin/pull/1491) + + +## Gin v1.3.0 + +- [NEW] Add [`func (*Context) QueryMap`](https://godoc.org/github.com/gin-gonic/gin#Context.QueryMap), [`func (*Context) GetQueryMap`](https://godoc.org/github.com/gin-gonic/gin#Context.GetQueryMap), [`func (*Context) PostFormMap`](https://godoc.org/github.com/gin-gonic/gin#Context.PostFormMap) and [`func (*Context) GetPostFormMap`](https://godoc.org/github.com/gin-gonic/gin#Context.GetPostFormMap) to support `type map[string]string` as query string or form parameters, see [#1383](https://github.com/gin-gonic/gin/pull/1383) +- [NEW] Add [`func (*Context) AsciiJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.AsciiJSON), see [#1358](https://github.com/gin-gonic/gin/pull/1358) +- [NEW] Add `Pusher()` in [`type ResponseWriter`](https://godoc.org/github.com/gin-gonic/gin#ResponseWriter) for supporting http2 push, see [#1273](https://github.com/gin-gonic/gin/pull/1273) +- [NEW] Add [`func (*Context) DataFromReader`](https://godoc.org/github.com/gin-gonic/gin#Context.DataFromReader) for serving dynamic data, see [#1304](https://github.com/gin-gonic/gin/pull/1304) +- [NEW] Add [`func (*Context) ShouldBindBodyWith`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindBodyWith) allowing to call binding multiple times, see [#1341](https://github.com/gin-gonic/gin/pull/1341) +- [NEW] Support pointers in form binding, see [#1336](https://github.com/gin-gonic/gin/pull/1336) +- [NEW] Add [`func (*Context) JSONP`](https://godoc.org/github.com/gin-gonic/gin#Context.JSONP), see [#1333](https://github.com/gin-gonic/gin/pull/1333) +- [NEW] Support default value in form binding, see [#1138](https://github.com/gin-gonic/gin/pull/1138) +- [NEW] Expose validator engine in [`type StructValidator`](https://godoc.org/github.com/gin-gonic/gin/binding#StructValidator), see [#1277](https://github.com/gin-gonic/gin/pull/1277) +- [NEW] Add [`func (*Context) ShouldBind`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBind), [`func (*Context) ShouldBindQuery`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindQuery) and [`func (*Context) ShouldBindJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindJSON), see [#1047](https://github.com/gin-gonic/gin/pull/1047) +- [NEW] Add support for `time.Time` location in form binding, see [#1117](https://github.com/gin-gonic/gin/pull/1117) +- [NEW] Add [`func (*Context) BindQuery`](https://godoc.org/github.com/gin-gonic/gin#Context.BindQuery), see [#1029](https://github.com/gin-gonic/gin/pull/1029) +- [NEW] Make [jsonite](https://github.com/json-iterator/go) optional with build tags, see [#1026](https://github.com/gin-gonic/gin/pull/1026) +- [NEW] Show query string in logger, see [#999](https://github.com/gin-gonic/gin/pull/999) +- [NEW] Add [`func (*Context) SecureJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.SecureJSON), see [#987](https://github.com/gin-gonic/gin/pull/987) and [#993](https://github.com/gin-gonic/gin/pull/993) +- [DEPRECATE] `func (*Context) GetCookie` for [`func (*Context) Cookie`](https://godoc.org/github.com/gin-gonic/gin#Context.Cookie) +- [FIX] Don't display color tags if [`func DisableConsoleColor`](https://godoc.org/github.com/gin-gonic/gin#DisableConsoleColor) called, see [#1072](https://github.com/gin-gonic/gin/pull/1072) +- [FIX] Gin Mode `""` when calling [`func Mode`](https://godoc.org/github.com/gin-gonic/gin#Mode) now returns `const DebugMode`, see [#1250](https://github.com/gin-gonic/gin/pull/1250) +- [FIX] `Flush()` now doesn't overwrite `responseWriter` status code, see [#1460](https://github.com/gin-gonic/gin/pull/1460) + +## Gin 1.2.0 - [NEW] Switch from godeps to govendor - [NEW] Add support for Let's Encrypt via gin-gonic/autotls @@ -23,25 +250,25 @@ - [FIX] Use X-Forwarded-For before X-Real-Ip - [FIX] time.Time binding (#904) -### Gin 1.1.4 +## Gin 1.1.4 - [NEW] Support google appengine for IsTerminal func -### Gin 1.1.3 +## Gin 1.1.3 - [FIX] Reverted Logger: skip ANSI color commands -### Gin 1.1 +## Gin 1.1 -- [NEW] Implement QueryArray and PostArray methods -- [NEW] Refactor GetQuery and GetPostForm -- [NEW] Add contribution guide +- [NEW] Implement QueryArray and PostArray methods +- [NEW] Refactor GetQuery and GetPostForm +- [NEW] Add contribution guide - [FIX] Corrected typos in README -- [FIX] Removed additional Iota -- [FIX] Changed imports to gopkg instead of github in README (#733) +- [FIX] Removed additional Iota +- [FIX] Changed imports to gopkg instead of github in README (#733) - [FIX] Logger: skip ANSI color commands if output is not a tty -### Gin 1.0rc2 (...) +## Gin 1.0rc2 (...) - [PERFORMANCE] Fast path for writing Content-Type. - [PERFORMANCE] Much faster 404 routing @@ -76,7 +303,7 @@ - [FIX] MIT license in every file -### Gin 1.0rc1 (May 22, 2015) +## Gin 1.0rc1 (May 22, 2015) - [PERFORMANCE] Zero allocation router - [PERFORMANCE] Faster JSON, XML and text rendering @@ -84,7 +311,7 @@ - [PERFORMANCE] Misc code optimizations. Inlining, tail call optimizations - [NEW] Built-in support for golang.org/x/net/context - [NEW] Any(path, handler). Create a route that matches any path -- [NEW] Refactored rendering pipeline (faster and static typeded) +- [NEW] Refactored rendering pipeline (faster and static typed) - [NEW] Refactored errors API - [NEW] IndentedJSON() prints pretty JSON - [NEW] Added gin.DefaultWriter @@ -120,7 +347,7 @@ - [FIX] Better support for Google App Engine (using log instead of fmt) -### Gin 0.6 (Mar 9, 2015) +## Gin 0.6 (Mar 9, 2015) - [NEW] Support multipart/form-data - [NEW] NoMethod handler @@ -130,14 +357,14 @@ - [FIX] Improve color logger -### Gin 0.5 (Feb 7, 2015) +## Gin 0.5 (Feb 7, 2015) - [NEW] Content Negotiation - [FIX] Solved security bug that allow a client to spoof ip - [FIX] Fix unexported/ignored fields in binding -### Gin 0.4 (Aug 21, 2014) +## Gin 0.4 (Aug 21, 2014) - [NEW] Development mode - [NEW] Unit tests @@ -146,14 +373,14 @@ - [FIX] Improved documentation for model binding -### Gin 0.3 (Jul 18, 2014) +## Gin 0.3 (Jul 18, 2014) - [PERFORMANCE] Normal log and error log are printed in the same call. - [PERFORMANCE] Improve performance of NoRouter() - [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults. - [NEW] Flexible rendering API - [NEW] Add Context.File() -- [NEW] Add shorcut RunTLS() for http.ListenAndServeTLS +- [NEW] Add shortcut RunTLS() for http.ListenAndServeTLS - [FIX] Rename NotFound404() to NoRoute() - [FIX] Errors in context are purged - [FIX] Adds HEAD method in Static file serving @@ -164,7 +391,7 @@ - [FIX] Check application/x-www-form-urlencoded when parsing form -### Gin 0.2b (Jul 08, 2014) +## Gin 0.2b (Jul 08, 2014) - [PERFORMANCE] Using sync.Pool to allocatio/gc overhead - [NEW] Travis CI integration - [NEW] Completely new logger @@ -176,14 +403,14 @@ - [NEW] New Bind() and BindWith() methods for parsing request body. - [NEW] Add Content.Copy() - [NEW] Add context.LastError() -- [NEW] Add shorcut for OPTIONS HTTP method +- [NEW] Add shortcut for OPTIONS HTTP method - [FIX] Tons of README fixes - [FIX] Header is written before body - [FIX] BasicAuth() and changes API a little bit - [FIX] Recovery() middleware only prints panics - [FIX] Context.Get() does not panic anymore. Use MustGet() instead. - [FIX] Multiple http.WriteHeader() in NotFound handlers -- [FIX] Engine.Run() panics if http server can't be setted up +- [FIX] Engine.Run() panics if http server can't be set up - [FIX] Crash when route path doesn't start with '/' - [FIX] Do not update header when status code is negative - [FIX] Setting response headers before calling WriteHeader in context.String() diff --git a/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md b/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..4ea14f395 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at teamgingonic@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md b/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md new file mode 100644 index 000000000..97daa808f --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md @@ -0,0 +1,13 @@ +## Contributing + +- With issues: + - Use the search tool before opening a new issue. + - Please provide source code and commit sha if you found a bug. + - Review existing issues and provide feedback or react to them. + +- With pull requests: + - Open your pull request against `master` + - Your pull request should have no more than two commits, if not you should squash them. + - It should pass all tests in the available continuous integration systems such as TravisCI. + - You should add/modify tests to cover your proposed code changes. + - If your pull request contains a new feature, please document it on the README. diff --git a/vendor/github.com/gin-gonic/gin/Makefile b/vendor/github.com/gin-gonic/gin/Makefile index 9ba475a43..1a9919395 100644 --- a/vendor/github.com/gin-gonic/gin/Makefile +++ b/vendor/github.com/gin-gonic/gin/Makefile @@ -1,15 +1,32 @@ +GO ?= go GOFMT ?= gofmt "-s" -PACKAGES ?= $(shell go list ./... | grep -v /vendor/) -GOFILES := $(shell find . -name "*.go" -type f -not -path "./vendor/*") - -all: build - -install: deps - govendor sync +PACKAGES ?= $(shell $(GO) list ./...) +VETPACKAGES ?= $(shell $(GO) list ./... | grep -v /examples/) +GOFILES := $(shell find . -name "*.go") +TESTFOLDER := $(shell $(GO) list ./... | grep -E 'gin$$|binding$$|render$$' | grep -v examples) +TESTTAGS ?= "" .PHONY: test test: - go test -v -covermode=count -coverprofile=coverage.out + echo "mode: count" > coverage.out + for d in $(TESTFOLDER); do \ + $(GO) test -tags $(TESTTAGS) -v -covermode=count -coverprofile=profile.out $$d > tmp.out; \ + cat tmp.out; \ + if grep -q "^--- FAIL" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + elif grep -q "build failed" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + elif grep -q "setup failed" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + fi; \ + if [ -f profile.out ]; then \ + cat profile.out | grep -v "mode:" >> coverage.out; \ + rm profile.out; \ + fi; \ + done .PHONY: fmt fmt: @@ -17,7 +34,6 @@ fmt: .PHONY: fmt-check fmt-check: - # get all go files and run go fmt on them @diff=$$($(GOFMT) -d $(GOFILES)); \ if [ -n "$$diff" ]; then \ echo "Please run 'make fmt' and commit the result:"; \ @@ -26,36 +42,30 @@ fmt-check: fi; vet: - go vet $(PACKAGES) - -deps: - @hash govendor > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - go get -u github.com/kardianos/govendor; \ - fi - @hash embedmd > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - go get -u github.com/campoy/embedmd; \ - fi - -embedmd: - embedmd -d *.md + $(GO) vet $(VETPACKAGES) .PHONY: lint lint: @hash golint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - go get -u github.com/golang/lint/golint; \ + $(GO) get -u golang.org/x/lint/golint; \ fi for PKG in $(PACKAGES); do golint -set_exit_status $$PKG || exit 1; done; .PHONY: misspell-check misspell-check: @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - go get -u github.com/client9/misspell/cmd/misspell; \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ fi misspell -error $(GOFILES) .PHONY: misspell misspell: @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ - go get -u github.com/client9/misspell/cmd/misspell; \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ fi misspell -w $(GOFILES) + +.PHONY: tools +tools: + go install golang.org/x/lint/golint; \ + go install github.com/client9/misspell/cmd/misspell; diff --git a/vendor/github.com/gin-gonic/gin/README.md b/vendor/github.com/gin-gonic/gin/README.md index 029606b71..d4772d764 100644 --- a/vendor/github.com/gin-gonic/gin/README.md +++ b/vendor/github.com/gin-gonic/gin/README.md @@ -1,19 +1,112 @@ # Gin Web Framework - + [![Build Status](https://travis-ci.org/gin-gonic/gin.svg)](https://travis-ci.org/gin-gonic/gin) - [![codecov](https://codecov.io/gh/gin-gonic/gin/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-gonic/gin) - [![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin) - [![GoDoc](https://godoc.org/github.com/gin-gonic/gin?status.svg)](https://godoc.org/github.com/gin-gonic/gin) - [![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![codecov](https://codecov.io/gh/gin-gonic/gin/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-gonic/gin) +[![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin) +[![GoDoc](https://pkg.go.dev/badge/github.com/gin-gonic/gin?status.svg)](https://pkg.go.dev/github.com/gin-gonic/gin?tab=doc) +[![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Sourcegraph](https://sourcegraph.com/github.com/gin-gonic/gin/-/badge.svg)](https://sourcegraph.com/github.com/gin-gonic/gin?badge) +[![Open Source Helpers](https://www.codetriage.com/gin-gonic/gin/badges/users.svg)](https://www.codetriage.com/gin-gonic/gin) +[![Release](https://img.shields.io/github/release/gin-gonic/gin.svg?style=flat-square)](https://github.com/gin-gonic/gin/releases) +[![TODOs](https://badgen.net/https/api.tickgit.com/badgen/github.com/gin-gonic/gin)](https://www.tickgit.com/browse?repo=github.com/gin-gonic/gin) + +Gin is a web framework written in Go (Golang). It features a martini-like API with performance that is up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin. + + +## Contents + +- [Gin Web Framework](#gin-web-framework) + - [Contents](#contents) + - [Installation](#installation) + - [Quick start](#quick-start) + - [Benchmarks](#benchmarks) + - [Gin v1. stable](#gin-v1-stable) + - [Build with jsoniter](#build-with-jsoniter) + - [API Examples](#api-examples) + - [Using GET, POST, PUT, PATCH, DELETE and OPTIONS](#using-get-post-put-patch-delete-and-options) + - [Parameters in path](#parameters-in-path) + - [Querystring parameters](#querystring-parameters) + - [Multipart/Urlencoded Form](#multiparturlencoded-form) + - [Another example: query + post form](#another-example-query--post-form) + - [Map as querystring or postform parameters](#map-as-querystring-or-postform-parameters) + - [Upload files](#upload-files) + - [Single file](#single-file) + - [Multiple files](#multiple-files) + - [Grouping routes](#grouping-routes) + - [Blank Gin without middleware by default](#blank-gin-without-middleware-by-default) + - [Using middleware](#using-middleware) + - [How to write log file](#how-to-write-log-file) + - [Custom Log Format](#custom-log-format) + - [Controlling Log output coloring](#controlling-log-output-coloring) + - [Model binding and validation](#model-binding-and-validation) + - [Custom Validators](#custom-validators) + - [Only Bind Query String](#only-bind-query-string) + - [Bind Query String or Post Data](#bind-query-string-or-post-data) + - [Bind Uri](#bind-uri) + - [Bind Header](#bind-header) + - [Bind HTML checkboxes](#bind-html-checkboxes) + - [Multipart/Urlencoded binding](#multiparturlencoded-binding) + - [XML, JSON, YAML and ProtoBuf rendering](#xml-json-yaml-and-protobuf-rendering) + - [SecureJSON](#securejson) + - [JSONP](#jsonp) + - [AsciiJSON](#asciijson) + - [PureJSON](#purejson) + - [Serving static files](#serving-static-files) + - [Serving data from file](#serving-data-from-file) + - [Serving data from reader](#serving-data-from-reader) + - [HTML rendering](#html-rendering) + - [Custom Template renderer](#custom-template-renderer) + - [Custom Delimiters](#custom-delimiters) + - [Custom Template Funcs](#custom-template-funcs) + - [Multitemplate](#multitemplate) + - [Redirects](#redirects) + - [Custom Middleware](#custom-middleware) + - [Using BasicAuth() middleware](#using-basicauth-middleware) + - [Goroutines inside a middleware](#goroutines-inside-a-middleware) + - [Custom HTTP configuration](#custom-http-configuration) + - [Support Let's Encrypt](#support-lets-encrypt) + - [Run multiple service using Gin](#run-multiple-service-using-gin) + - [Graceful shutdown or restart](#graceful-shutdown-or-restart) + - [Third-party packages](#third-party-packages) + - [Manually](#manually) + - [Build a single binary with templates](#build-a-single-binary-with-templates) + - [Bind form-data request with custom struct](#bind-form-data-request-with-custom-struct) + - [Try to bind body into different structs](#try-to-bind-body-into-different-structs) + - [http2 server push](#http2-server-push) + - [Define format for the log of routes](#define-format-for-the-log-of-routes) + - [Set and get a cookie](#set-and-get-a-cookie) + - [Testing](#testing) + - [Users](#users) + +## Installation + +To install Gin package, you need to install Go and set your Go workspace first. + +1. The first need [Go](https://golang.org/) installed (**version 1.12+ is required**), then you can use the below Go command to install Gin. -Gin is a web framework written in Go (Golang). It features a martini-like API with much better performance, up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin. +```sh +$ go get -u github.com/gin-gonic/gin +``` + +2. Import it in your code: + +```go +import "github.com/gin-gonic/gin" +``` + +3. (Optional) Import `net/http`. This is required for example if using constants such as `http.StatusOK`. + +```go +import "net/http" +``` -![Gin console logger](https://gin-gonic.github.io/gin/other/console.png) +## Quick start ```sh -$ cat test.go +# assume the following codes in example.go file +$ cat example.go ``` ```go @@ -28,90 +121,83 @@ func main() { "message": "pong", }) }) - r.Run() // listen and serve on 0.0.0.0:8080 + r.Run() // listen and serve on 0.0.0.0:8080 (for windows "localhost:8080") } ``` +``` +# run example.go and visit 0.0.0.0:8080/ping (for windows "localhost:8080/ping") on browser +$ go run example.go +``` + ## Benchmarks -Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter) +Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter) [See all benchmarks](/BENCHMARKS.md) - -Benchmark name | (1) | (2) | (3) | (4) ---------------------------------|----------:|----------:|----------:|------: -BenchmarkAce_GithubAll | 10000 | 109482 | 13792 | 167 -BenchmarkBear_GithubAll | 10000 | 287490 | 79952 | 943 -BenchmarkBeego_GithubAll | 3000 | 562184 | 146272 | 2092 -BenchmarkBone_GithubAll | 500 | 2578716 | 648016 | 8119 -BenchmarkDenco_GithubAll | 20000 | 94955 | 20224 | 167 -BenchmarkEcho_GithubAll | 30000 | 58705 | 0 | 0 -**BenchmarkGin_GithubAll** | **30000** | **50991** | **0** | **0** -BenchmarkGocraftWeb_GithubAll | 5000 | 449648 | 133280 | 1889 -BenchmarkGoji_GithubAll | 2000 | 689748 | 56113 | 334 -BenchmarkGoJsonRest_GithubAll | 5000 | 537769 | 135995 | 2940 -BenchmarkGoRestful_GithubAll | 100 | 18410628 | 797236 | 7725 -BenchmarkGorillaMux_GithubAll | 200 | 8036360 | 153137 | 1791 -BenchmarkHttpRouter_GithubAll | 20000 | 63506 | 13792 | 167 -BenchmarkHttpTreeMux_GithubAll | 10000 | 165927 | 56112 | 334 -BenchmarkKocha_GithubAll | 10000 | 171362 | 23304 | 843 -BenchmarkMacaron_GithubAll | 2000 | 817008 | 224960 | 2315 -BenchmarkMartini_GithubAll | 100 | 12609209 | 237952 | 2686 -BenchmarkPat_GithubAll | 300 | 4830398 | 1504101 | 32222 -BenchmarkPossum_GithubAll | 10000 | 301716 | 97440 | 812 -BenchmarkR2router_GithubAll | 10000 | 270691 | 77328 | 1182 -BenchmarkRevel_GithubAll | 1000 | 1491919 | 345553 | 5918 -BenchmarkRivet_GithubAll | 10000 | 283860 | 84272 | 1079 -BenchmarkTango_GithubAll | 5000 | 473821 | 87078 | 2470 -BenchmarkTigerTonic_GithubAll | 2000 | 1120131 | 241088 | 6052 -BenchmarkTraffic_GithubAll | 200 | 8708979 | 2664762 | 22390 -BenchmarkVulcan_GithubAll | 5000 | 353392 | 19894 | 609 -BenchmarkZeus_GithubAll | 2000 | 944234 | 300688 | 2648 - -(1): Total Repetitions -(2): Single Repetition Duration (ns/op) -(3): Heap Memory (B/op) -(4): Average Allocations per Repetition (allocs/op) +| Benchmark name | (1) | (2) | (3) | (4) | +| ------------------------------ | ---------:| ---------------:| ------------:| ---------------:| +| BenchmarkGin_GithubAll | **43550** | **27364 ns/op** | **0 B/op** | **0 allocs/op** | +| BenchmarkAce_GithubAll | 40543 | 29670 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAero_GithubAll | 57632 | 20648 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBear_GithubAll | 9234 | 216179 ns/op | 86448 B/op | 943 allocs/op | +| BenchmarkBeego_GithubAll | 7407 | 243496 ns/op | 71456 B/op | 609 allocs/op | +| BenchmarkBone_GithubAll | 420 | 2922835 ns/op | 720160 B/op | 8620 allocs/op | +| BenchmarkChi_GithubAll | 7620 | 238331 ns/op | 87696 B/op | 609 allocs/op | +| BenchmarkDenco_GithubAll | 18355 | 64494 ns/op | 20224 B/op | 167 allocs/op | +| BenchmarkEcho_GithubAll | 31251 | 38479 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkGocraftWeb_GithubAll | 4117 | 300062 ns/op | 131656 B/op | 1686 allocs/op | +| BenchmarkGoji_GithubAll | 3274 | 416158 ns/op | 56112 B/op | 334 allocs/op | +| BenchmarkGojiv2_GithubAll | 1402 | 870518 ns/op | 352720 B/op | 4321 allocs/op | +| BenchmarkGoJsonRest_GithubAll | 2976 | 401507 ns/op | 134371 B/op | 2737 allocs/op | +| BenchmarkGoRestful_GithubAll | 410 | 2913158 ns/op | 910144 B/op | 2938 allocs/op | +| BenchmarkGorillaMux_GithubAll | 346 | 3384987 ns/op | 251650 B/op | 1994 allocs/op | +| BenchmarkGowwwRouter_GithubAll | 10000 | 143025 ns/op | 72144 B/op | 501 allocs/op | +| BenchmarkHttpRouter_GithubAll | 55938 | 21360 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHttpTreeMux_GithubAll | 10000 | 153944 ns/op | 65856 B/op | 671 allocs/op | +| BenchmarkKocha_GithubAll | 10000 | 106315 ns/op | 23304 B/op | 843 allocs/op | +| BenchmarkLARS_GithubAll | 47779 | 25084 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMacaron_GithubAll | 3266 | 371907 ns/op | 149409 B/op | 1624 allocs/op | +| BenchmarkMartini_GithubAll | 331 | 3444706 ns/op | 226551 B/op | 2325 allocs/op | +| BenchmarkPat_GithubAll | 273 | 4381818 ns/op | 1483152 B/op | 26963 allocs/op | +| BenchmarkPossum_GithubAll | 10000 | 164367 ns/op | 84448 B/op | 609 allocs/op | +| BenchmarkR2router_GithubAll | 10000 | 160220 ns/op | 77328 B/op | 979 allocs/op | +| BenchmarkRivet_GithubAll | 14625 | 82453 ns/op | 16272 B/op | 167 allocs/op | +| BenchmarkTango_GithubAll | 6255 | 279611 ns/op | 63826 B/op | 1618 allocs/op | +| BenchmarkTigerTonic_GithubAll | 2008 | 687874 ns/op | 193856 B/op | 4474 allocs/op | +| BenchmarkTraffic_GithubAll | 355 | 3478508 ns/op | 820744 B/op | 14114 allocs/op | +| BenchmarkVulcan_GithubAll | 6885 | 193333 ns/op | 19894 B/op | 609 allocs/op | + +- (1): Total Repetitions achieved in constant time, higher means more confident result +- (2): Single Repetition Duration (ns/op), lower is better +- (3): Heap Memory (B/op), lower is better +- (4): Average Allocations per Repetition (allocs/op), lower is better ## Gin v1. stable - [x] Zero allocation router. - [x] Still the fastest http router and framework. From routing to writing. -- [x] Complete suite of unit tests -- [x] Battle tested +- [x] Complete suite of unit tests. +- [x] Battle tested. - [x] API frozen, new releases will not break your code. +## Build with [jsoniter](https://github.com/json-iterator/go) -## Start using it - -1. Download and install it: +Gin uses `encoding/json` as default json package but you can change to [jsoniter](https://github.com/json-iterator/go) by build from other tags. ```sh -$ go get github.com/gin-gonic/gin -``` - -2. Import it in your code: - -```go -import "github.com/gin-gonic/gin" -``` - -3. (Optional) Import `net/http`. This is required for example if using constants such as `http.StatusOK`. - -```go -import "net/http" +$ go build -tags=jsoniter . ``` ## API Examples +You can find a number of ready-to-run examples at [Gin examples repository](https://github.com/gin-gonic/examples). + ### Using GET, POST, PUT, PATCH, DELETE and OPTIONS ```go func main() { - // Disable Console Color - // gin.DisableConsoleColor() - // Creates a gin router with default middleware: // logger and recovery (crash-free) middleware router := gin.Default() @@ -137,7 +223,7 @@ func main() { func main() { router := gin.Default() - // This handler will match /user/john but will not match neither /user/ or /user + // This handler will match /user/john but will not match /user/ or /user router.GET("/user/:name", func(c *gin.Context) { name := c.Param("name") c.String(http.StatusOK, "Hello %s", name) @@ -152,6 +238,18 @@ func main() { c.String(http.StatusOK, message) }) + // For each matched request Context will hold the route definition + router.POST("/user/:name/*action", func(c *gin.Context) { + c.FullPath() == "/user/:name/*action" // true + }) + + // This handler will add a new router for /user/groups. + // Exact routes are resolved before param routes, regardless of the order they were defined. + // Routes starting with /user/groups are never interpreted as /user/:name/... routes + router.GET("/user/groups", func(c *gin.Context) { + c.String(http.StatusOK, "The available groups are [...]", name) + }) + router.Run(":8080") } ``` @@ -224,21 +322,58 @@ func main() { id: 1234; page: 1; name: manu; message: this_is_great ``` +### Map as querystring or postform parameters + +``` +POST /post?ids[a]=1234&ids[b]=hello HTTP/1.1 +Content-Type: application/x-www-form-urlencoded + +names[first]=thinkerou&names[second]=tianou +``` + +```go +func main() { + router := gin.Default() + + router.POST("/post", func(c *gin.Context) { + + ids := c.QueryMap("ids") + names := c.PostFormMap("names") + + fmt.Printf("ids: %v; names: %v", ids, names) + }) + router.Run(":8080") +} +``` + +``` +ids: map[b:hello a:1234]; names: map[second:tianou first:thinkerou] +``` + ### Upload files #### Single file -References issue [#774](https://github.com/gin-gonic/gin/issues/774) and detail [example code](examples/upload-file/single). +References issue [#774](https://github.com/gin-gonic/gin/issues/774) and detail [example code](https://github.com/gin-gonic/examples/tree/master/upload-file/single). + +`file.Filename` **SHOULD NOT** be trusted. See [`Content-Disposition` on MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition#Directives) and [#1693](https://github.com/gin-gonic/gin/issues/1693) + +> The filename is always optional and must not be used blindly by the application: path information should be stripped, and conversion to the server file system rules should be done. ```go func main() { router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + router.MaxMultipartMemory = 8 << 20 // 8 MiB router.POST("/upload", func(c *gin.Context) { // single file file, _ := c.FormFile("file") log.Println(file.Filename) - c.String(http.StatusOK, fmt.Printf("'%s' uploaded!", file.Filename)) + // Upload the file to specific dst. + c.SaveUploadedFile(file, dst) + + c.String(http.StatusOK, fmt.Sprintf("'%s' uploaded!", file.Filename)) }) router.Run(":8080") } @@ -254,11 +389,13 @@ curl -X POST http://localhost:8080/upload \ #### Multiple files -See the detail [example code](examples/upload-file/multiple). +See the detail [example code](https://github.com/gin-gonic/examples/tree/master/upload-file/multiple). ```go func main() { router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + router.MaxMultipartMemory = 8 << 20 // 8 MiB router.POST("/upload", func(c *gin.Context) { // Multipart form form, _ := c.MultipartForm() @@ -266,8 +403,11 @@ func main() { for _, file := range files { log.Println(file.Filename) + + // Upload the file to specific dst. + c.SaveUploadedFile(file, dst) } - c.String(http.StatusOK, fmt.Printf("%d files uploaded!", len(files))) + c.String(http.StatusOK, fmt.Sprintf("%d files uploaded!", len(files))) }) router.Run(":8080") } @@ -319,6 +459,7 @@ r := gin.New() instead of ```go +// Default With the Logger and Recovery middleware already attached r := gin.Default() ``` @@ -330,7 +471,11 @@ func main() { r := gin.New() // Global middleware + // Logger middleware will write the logs to gin.DefaultWriter even if you set with GIN_MODE=release. + // By default gin.DefaultWriter = os.Stdout r.Use(gin.Logger()) + + // Recovery middleware recovers from any panics and writes a 500 if there was one. r.Use(gin.Recovery()) // Per route middleware, you can add as many as you desire. @@ -358,21 +503,165 @@ func main() { } ``` +### Custom Recovery behavior +```go +func main() { + // Creates a router without any middleware by default + r := gin.New() + + // Global middleware + // Logger middleware will write the logs to gin.DefaultWriter even if you set with GIN_MODE=release. + // By default gin.DefaultWriter = os.Stdout + r.Use(gin.Logger()) + + // Recovery middleware recovers from any panics and writes a 500 if there was one. + r.Use(gin.CustomRecovery(func(c *gin.Context, recovered interface{}) { + if err, ok := recovered.(string); ok { + c.String(http.StatusInternalServerError, fmt.Sprintf("error: %s", err)) + } + c.AbortWithStatus(http.StatusInternalServerError) + })) + + r.GET("/panic", func(c *gin.Context) { + // panic with a string -- the custom middleware could save this to a database or report it to the user + panic("foo") + }) + + r.GET("/", func(c *gin.Context) { + c.String(http.StatusOK, "ohai") + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### How to write log file +```go +func main() { + // Disable Console Color, you don't need console color when writing the logs to file. + gin.DisableConsoleColor() + + // Logging to a file. + f, _ := os.Create("gin.log") + gin.DefaultWriter = io.MultiWriter(f) + + // Use the following code if you need to write the logs to file and console at the same time. + // gin.DefaultWriter = io.MultiWriter(f, os.Stdout) + + router := gin.Default() + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + +    router.Run(":8080") +} +``` + +### Custom Log Format +```go +func main() { + router := gin.New() + + // LoggerWithFormatter middleware will write the logs to gin.DefaultWriter + // By default gin.DefaultWriter = os.Stdout + router.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string { + + // your custom format + return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n", + param.ClientIP, + param.TimeStamp.Format(time.RFC1123), + param.Method, + param.Path, + param.Request.Proto, + param.StatusCode, + param.Latency, + param.Request.UserAgent(), + param.ErrorMessage, + ) + })) + router.Use(gin.Recovery()) + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + +**Sample Output** +``` +::1 - [Fri, 07 Dec 2018 17:04:38 JST] "GET /ping HTTP/1.1 200 122.767µs "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36" " +``` + +### Controlling Log output coloring + +By default, logs output on console should be colorized depending on the detected TTY. + +Never colorize logs: + +```go +func main() { + // Disable log's color + gin.DisableConsoleColor() + + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + +Always colorize logs: + +```go +func main() { + // Force log's color + gin.ForceConsoleColor() + + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + ### Model binding and validation -To bind a request body into a type, use model binding. We currently support binding of JSON, XML and standard form values (foo=bar&boo=baz). +To bind a request body into a type, use model binding. We currently support binding of JSON, XML, YAML and standard form values (foo=bar&boo=baz). + +Gin uses [**go-playground/validator/v10**](https://github.com/go-playground/validator) for validation. Check the full docs on tags usage [here](https://godoc.org/github.com/go-playground/validator#hdr-Baked_In_Validators_and_Tags). Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`. -When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use BindWith. +Also, Gin provides two sets of methods for binding: +- **Type** - Must bind + - **Methods** - `Bind`, `BindJSON`, `BindXML`, `BindQuery`, `BindYAML`, `BindHeader` + - **Behavior** - These methods use `MustBindWith` under the hood. If there is a binding error, the request is aborted with `c.AbortWithError(400, err).SetType(ErrorTypeBind)`. This sets the response status code to 400 and the `Content-Type` header is set to `text/plain; charset=utf-8`. Note that if you try to set the response code after this, it will result in a warning `[GIN-debug] [WARNING] Headers were already written. Wanted to override status code 400 with 422`. If you wish to have greater control over the behavior, consider using the `ShouldBind` equivalent method. +- **Type** - Should bind + - **Methods** - `ShouldBind`, `ShouldBindJSON`, `ShouldBindXML`, `ShouldBindQuery`, `ShouldBindYAML`, `ShouldBindHeader` + - **Behavior** - These methods use `ShouldBindWith` under the hood. If there is a binding error, the error is returned and it is the developer's responsibility to handle the request and error appropriately. + +When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use `MustBindWith` or `ShouldBindWith`. -You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, the current request will fail with an error. +You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, an error will be returned. ```go // Binding from JSON type Login struct { - User string `form:"user" json:"user" binding:"required"` - Password string `form:"password" json:"password" binding:"required"` + User string `form:"user" json:"user" xml:"user" binding:"required"` + Password string `form:"password" json:"password" xml:"password" binding:"required"` } func main() { @@ -381,26 +670,55 @@ func main() { // Example for binding JSON ({"user": "manu", "password": "123"}) router.POST("/loginJSON", func(c *gin.Context) { var json Login - if c.BindJSON(&json) == nil { - if json.User == "manu" && json.Password == "123" { - c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) - } else { - c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) - } + if err := c.ShouldBindJSON(&json); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if json.User != "manu" || json.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + }) + + // Example for binding XML ( + // + // + // user + // 123 + // ) + router.POST("/loginXML", func(c *gin.Context) { + var xml Login + if err := c.ShouldBindXML(&xml); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return } + + if xml.User != "manu" || xml.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) }) // Example for binding a HTML form (user=manu&password=123) router.POST("/loginForm", func(c *gin.Context) { var form Login // This will infer what binder to use depending on the content-type header. - if c.Bind(&form) == nil { - if form.User == "manu" && form.Password == "123" { - c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) - } else { - c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) - } + if err := c.ShouldBind(&form); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if form.User != "manu" || form.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) }) // Listen and serve on 0.0.0.0:8080 @@ -408,15 +726,112 @@ func main() { } ``` -### Bind Query String +**Sample request** +```shell +$ curl -v -X POST \ + http://localhost:8080/loginJSON \ + -H 'content-type: application/json' \ + -d '{ "user": "manu" }' +> POST /loginJSON HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.51.0 +> Accept: */* +> content-type: application/json +> Content-Length: 18 +> +* upload completely sent off: 18 out of 18 bytes +< HTTP/1.1 400 Bad Request +< Content-Type: application/json; charset=utf-8 +< Date: Fri, 04 Aug 2017 03:51:31 GMT +< Content-Length: 100 +< +{"error":"Key: 'Login.Password' Error:Field validation for 'Password' failed on the 'required' tag"} +``` -See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-264681292). +**Skip validate** + +When running the above example using the above the `curl` command, it returns error. Because the example use `binding:"required"` for `Password`. If use `binding:"-"` for `Password`, then it will not return error when running the above example again. + +### Custom Validators + +It is also possible to register custom validators. See the [example code](https://github.com/gin-gonic/examples/tree/master/custom-validation/server.go). ```go package main -import "log" -import "github.com/gin-gonic/gin" +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" + "github.com/go-playground/validator/v10" +) + +// Booking contains binded and validated data. +type Booking struct { + CheckIn time.Time `form:"check_in" binding:"required,bookabledate" time_format:"2006-01-02"` + CheckOut time.Time `form:"check_out" binding:"required,gtfield=CheckIn" time_format:"2006-01-02"` +} + +var bookableDate validator.Func = func(fl validator.FieldLevel) bool { + date, ok := fl.Field().Interface().(time.Time) + if ok { + today := time.Now() + if today.After(date) { + return false + } + } + return true +} + +func main() { + route := gin.Default() + + if v, ok := binding.Validator.Engine().(*validator.Validate); ok { + v.RegisterValidation("bookabledate", bookableDate) + } + + route.GET("/bookable", getBookable) + route.Run(":8085") +} + +func getBookable(c *gin.Context) { + var b Booking + if err := c.ShouldBindWith(&b, binding.Query); err == nil { + c.JSON(http.StatusOK, gin.H{"message": "Booking dates are valid!"}) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} +``` + +```console +$ curl "localhost:8085/bookable?check_in=2030-04-16&check_out=2030-04-17" +{"message":"Booking dates are valid!"} + +$ curl "localhost:8085/bookable?check_in=2030-03-10&check_out=2030-03-09" +{"error":"Key: 'Booking.CheckOut' Error:Field validation for 'CheckOut' failed on the 'gtfield' tag"} + +$ curl "localhost:8085/bookable?check_in=2000-03-09&check_out=2000-03-10" +{"error":"Key: 'Booking.CheckIn' Error:Field validation for 'CheckIn' failed on the 'bookabledate' tag"}% +``` + +[Struct level validations](https://github.com/go-playground/validator/releases/tag/v8.7) can also be registered this way. +See the [struct-lvl-validation example](https://github.com/gin-gonic/examples/tree/master/struct-lvl-validations) to learn more. + +### Only Bind Query String + +`ShouldBindQuery` function only binds the query params and not the post data. See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-315953017). + +```go +package main + +import ( + "log" + + "github.com/gin-gonic/gin" +) type Person struct { Name string `form:"name"` @@ -425,53 +840,222 @@ type Person struct { func main() { route := gin.Default() - route.GET("/testing", startPage) + route.Any("/testing", startPage) route.Run(":8085") } func startPage(c *gin.Context) { var person Person - // If `GET`, only `Form` binding engine (`query`) used. - // If `POST`, first checks the `content-type` for `JSON` or `XML`, then uses `Form` (`form-data`). - // See more at https://github.com/gin-gonic/gin/blob/develop/binding/binding.go#L45 - if c.Bind(&person) == nil { + if c.ShouldBindQuery(&person) == nil { + log.Println("====== Only Bind By Query String ======") log.Println(person.Name) log.Println(person.Address) } + c.String(200, "Success") +} + +``` + +### Bind Query String or Post Data + +See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-264681292). + +```go +package main + +import ( + "log" + "time" + + "github.com/gin-gonic/gin" +) + +type Person struct { + Name string `form:"name"` + Address string `form:"address"` + Birthday time.Time `form:"birthday" time_format:"2006-01-02" time_utc:"1"` + CreateTime time.Time `form:"createTime" time_format:"unixNano"` + UnixTime time.Time `form:"unixTime" time_format:"unix"` +} + +func main() { + route := gin.Default() + route.GET("/testing", startPage) + route.Run(":8085") +} + +func startPage(c *gin.Context) { + var person Person + // If `GET`, only `Form` binding engine (`query`) used. + // If `POST`, first checks the `content-type` for `JSON` or `XML`, then uses `Form` (`form-data`). + // See more at https://github.com/gin-gonic/gin/blob/master/binding/binding.go#L48 + if c.ShouldBind(&person) == nil { + log.Println(person.Name) + log.Println(person.Address) + log.Println(person.Birthday) + log.Println(person.CreateTime) + log.Println(person.UnixTime) + } c.String(200, "Success") } ``` -### Multipart/Urlencoded binding +Test it with: +```sh +$ curl -X GET "localhost:8085/testing?name=appleboy&address=xyz&birthday=1992-03-15&createTime=1562400033000000123&unixTime=1562400033" +``` + +### Bind Uri + +See the [detail information](https://github.com/gin-gonic/gin/issues/846). + +```go +package main + +import "github.com/gin-gonic/gin" + +type Person struct { + ID string `uri:"id" binding:"required,uuid"` + Name string `uri:"name" binding:"required"` +} + +func main() { + route := gin.Default() + route.GET("/:name/:id", func(c *gin.Context) { + var person Person + if err := c.ShouldBindUri(&person); err != nil { + c.JSON(400, gin.H{"msg": err}) + return + } + c.JSON(200, gin.H{"name": person.Name, "uuid": person.ID}) + }) + route.Run(":8088") +} +``` + +Test it with: +```sh +$ curl -v localhost:8088/thinkerou/987fbc97-4bed-5078-9f07-9141ba07c9f3 +$ curl -v localhost:8088/thinkerou/not-uuid +``` + +### Bind Header ```go package main import ( + "fmt" "github.com/gin-gonic/gin" ) -type LoginForm struct { - User string `form:"user" binding:"required"` - Password string `form:"password" binding:"required"` +type testHeader struct { + Rate int `header:"Rate"` + Domain string `header:"Domain"` +} + +func main() { + r := gin.Default() + r.GET("/", func(c *gin.Context) { + h := testHeader{} + + if err := c.ShouldBindHeader(&h); err != nil { + c.JSON(200, err) + } + + fmt.Printf("%#v\n", h) + c.JSON(200, gin.H{"Rate": h.Rate, "Domain": h.Domain}) + }) + + r.Run() + +// client +// curl -H "rate:300" -H "domain:music" 127.0.0.1:8080/ +// output +// {"Domain":"music","Rate":300} +} +``` + +### Bind HTML checkboxes + +See the [detail information](https://github.com/gin-gonic/gin/issues/129#issuecomment-124260092) + +main.go + +```go +... + +type myForm struct { + Colors []string `form:"colors[]"` +} + +... + +func formHandler(c *gin.Context) { + var fakeForm myForm + c.ShouldBind(&fakeForm) + c.JSON(200, gin.H{"color": fakeForm.Colors}) +} + +... + +``` + +form.html + +```html +
+

Check some colors

+ + + + + + + +
+``` + +result: + +``` +{"color":["red","green","blue"]} +``` + +### Multipart/Urlencoded binding + +```go +type ProfileForm struct { + Name string `form:"name" binding:"required"` + Avatar *multipart.FileHeader `form:"avatar" binding:"required"` + + // or for multiple files + // Avatars []*multipart.FileHeader `form:"avatar" binding:"required"` } func main() { router := gin.Default() - router.POST("/login", func(c *gin.Context) { + router.POST("/profile", func(c *gin.Context) { // you can bind multipart form with explicit binding declaration: - // c.MustBindWith(&form, binding.Form) - // or you can simply use autobinding with Bind method: - var form LoginForm + // c.ShouldBindWith(&form, binding.Form) + // or you can simply use autobinding with ShouldBind method: + var form ProfileForm // in this case proper binding will be automatically selected - if c.Bind(&form) == nil { - if form.User == "user" && form.Password == "password" { - c.JSON(200, gin.H{"status": "you are logged in"}) - } else { - c.JSON(401, gin.H{"status": "unauthorized"}) - } + if err := c.ShouldBind(&form); err != nil { + c.String(http.StatusBadRequest, "bad request") + return + } + + err := c.SaveUploadedFile(form.Avatar, form.Avatar.Filename) + if err != nil { + c.String(http.StatusInternalServerError, "unknown error") + return } + + // db.Save(&form) + + c.String(http.StatusOK, "ok") }) router.Run(":8080") } @@ -479,10 +1063,10 @@ func main() { Test it with: ```sh -$ curl -v --form user=user --form password=password http://localhost:8080/login +$ curl -X POST -v --form name=user --form "avatar=@./avatar.png" http://localhost:8080/profile ``` -### XML, JSON and YAML rendering +### XML, JSON, YAML and ProtoBuf rendering ```go func main() { @@ -516,21 +1100,178 @@ func main() { c.YAML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) }) + r.GET("/someProtoBuf", func(c *gin.Context) { + reps := []int64{int64(1), int64(2)} + label := "test" + // The specific definition of protobuf is written in the testdata/protoexample file. + data := &protoexample.Test{ + Label: &label, + Reps: reps, + } + // Note that data becomes binary data in the response + // Will output protoexample.Test protobuf serialized data + c.ProtoBuf(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +#### SecureJSON + +Using SecureJSON to prevent json hijacking. Default prepends `"while(1),"` to response body if the given struct is array values. + +```go +func main() { + r := gin.Default() + + // You can also use your own secure json prefix + // r.SecureJsonPrefix(")]}',\n") + + r.GET("/someJSON", func(c *gin.Context) { + names := []string{"lena", "austin", "foo"} + + // Will output : while(1);["lena","austin","foo"] + c.SecureJSON(http.StatusOK, names) + }) + // Listen and serve on 0.0.0.0:8080 r.Run(":8080") } +``` +#### JSONP + +Using JSONP to request data from a server in a different domain. Add callback to response body if the query parameter callback exists. + +```go +func main() { + r := gin.Default() + + r.GET("/JSONP", func(c *gin.Context) { + data := gin.H{ + "foo": "bar", + } + + //callback is x + // Will output : x({\"foo\":\"bar\"}) + c.JSONP(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") + + // client + // curl http://127.0.0.1:8080/JSONP?callback=x +} +``` + +#### AsciiJSON + +Using AsciiJSON to Generates ASCII-only JSON with escaped non-ASCII characters. + +```go +func main() { + r := gin.Default() + + r.GET("/someJSON", func(c *gin.Context) { + data := gin.H{ + "lang": "GO语言", + "tag": "
", + } + + // will output : {"lang":"GO\u8bed\u8a00","tag":"\u003cbr\u003e"} + c.AsciiJSON(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +#### PureJSON + +Normally, JSON replaces special HTML characters with their unicode entities, e.g. `<` becomes `\u003c`. If you want to encode such characters literally, you can use PureJSON instead. +This feature is unavailable in Go 1.6 and lower. + +```go +func main() { + r := gin.Default() + + // Serves unicode entities + r.GET("/json", func(c *gin.Context) { + c.JSON(200, gin.H{ + "html": "Hello, world!", + }) + }) + + // Serves literal characters + r.GET("/purejson", func(c *gin.Context) { + c.PureJSON(200, gin.H{ + "html": "Hello, world!", + }) + }) + + // listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Serving static files + +```go +func main() { + router := gin.Default() + router.Static("/assets", "./assets") + router.StaticFS("/more_static", http.Dir("my_file_system")) + router.StaticFile("/favicon.ico", "./resources/favicon.ico") + + // Listen and serve on 0.0.0.0:8080 + router.Run(":8080") +} +``` + +### Serving data from file + +```go +func main() { + router := gin.Default() + + router.GET("/local/file", func(c *gin.Context) { + c.File("local/file.go") + }) + + var fs http.FileSystem = // ... + router.GET("/fs/file", func(c *gin.Context) { + c.FileFromFS("fs/file.go", fs) + }) +} + ``` -### Serving static files +### Serving data from reader ```go func main() { router := gin.Default() - router.Static("/assets", "./assets") - router.StaticFS("/more_static", http.Dir("my_file_system")) - router.StaticFile("/favicon.ico", "./resources/favicon.ico") + router.GET("/someDataFromReader", func(c *gin.Context) { + response, err := http.Get("https://raw.githubusercontent.com/gin-gonic/logo/master/color.png") + if err != nil || response.StatusCode != http.StatusOK { + c.Status(http.StatusServiceUnavailable) + return + } - // Listen and serve on 0.0.0.0:8080 + reader := response.Body + defer reader.Close() + contentLength := response.ContentLength + contentType := response.Header.Get("Content-Type") + + extraHeaders := map[string]string{ + "Content-Disposition": `attachment; filename="gopher.png"`, + } + + c.DataFromReader(http.StatusOK, contentLength, contentType, reader, extraHeaders) + }) router.Run(":8080") } ``` @@ -607,6 +1348,8 @@ templates/users/index.tmpl {{ end }} ``` +#### Custom Template renderer + You can also use your own html template render ```go @@ -620,41 +1363,54 @@ func main() { } ``` +#### Custom Delimiters + You may use custom delims ```go r := gin.Default() r.Delims("{[{", "}]}") - r.LoadHTMLGlob("/path/to/templates")) -``` + r.LoadHTMLGlob("/path/to/templates") +``` -#### Add custom template funcs +#### Custom Template Funcs + +See the detail [example code](https://github.com/gin-gonic/examples/tree/master/template). main.go ```go - ... - - func formatAsDate(t time.Time) string { - year, month, day := t.Date() - return fmt.Sprintf("%d/%02d/%02d", year, month, day) - } - - ... - - router.SetFuncMap(template.FuncMap{ - "formatAsDate": formatAsDate, - }) - - ... - - router.GET("/raw", func(c *Context) { - c.HTML(http.StatusOK, "raw.tmpl", map[string]interface{}{ - "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC), - }) - }) - - ... +import ( + "fmt" + "html/template" + "net/http" + "time" + + "github.com/gin-gonic/gin" +) + +func formatAsDate(t time.Time) string { + year, month, day := t.Date() + return fmt.Sprintf("%d%02d/%02d", year, month, day) +} + +func main() { + router := gin.Default() + router.Delims("{[{", "}]}") + router.SetFuncMap(template.FuncMap{ + "formatAsDate": formatAsDate, + }) + router.LoadHTMLFiles("./testdata/template/raw.tmpl") + + router.GET("/raw", func(c *gin.Context) { + c.HTML(http.StatusOK, "raw.tmpl", gin.H{ + "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC), + }) + }) + + router.Run(":8080") +} + ``` raw.tmpl @@ -674,14 +1430,32 @@ Gin allow by default use only one html.Template. Check [a multitemplate render]( ### Redirects -Issuing a HTTP redirect is easy: +Issuing a HTTP redirect is easy. Both internal and external locations are supported. ```go r.GET("/test", func(c *gin.Context) { c.Redirect(http.StatusMovedPermanently, "http://www.google.com/") }) ``` -Both internal and external locations are supported. + +Issuing a HTTP redirect from POST. Refer to issue: [#444](https://github.com/gin-gonic/gin/issues/444) +```go +r.POST("/test", func(c *gin.Context) { + c.Redirect(http.StatusFound, "/foo") +}) +``` + +Issuing a Router redirect, use `HandleContext` like below. + +``` go +r.GET("/test", func(c *gin.Context) { + c.Request.URL.Path = "/test2" + r.HandleContext(c) +}) +r.GET("/test2", func(c *gin.Context) { + c.JSON(200, gin.H{"hello": "world"}) +}) +``` ### Custom Middleware @@ -765,7 +1539,7 @@ func main() { ### Goroutines inside a middleware -When starting inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy. +When starting new Goroutines inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy. ```go func main() { @@ -827,7 +1601,6 @@ func main() { example for 1-line LetsEncrypt HTTPS servers. -[embedmd]:# (examples/auto-tls/example1.go go) ```go package main @@ -852,7 +1625,6 @@ func main() { example for custom autocert manager. -[embedmd]:# (examples/auto-tls/example2.go go) ```go package main @@ -878,16 +1650,106 @@ func main() { Cache: autocert.DirCache("/var/www/.cache"), } - log.Fatal(autotls.RunWithManager(r, m)) + log.Fatal(autotls.RunWithManager(r, &m)) +} +``` + +### Run multiple service using Gin + +See the [question](https://github.com/gin-gonic/gin/issues/346) and try the following example: + +```go +package main + +import ( + "log" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "golang.org/x/sync/errgroup" +) + +var ( + g errgroup.Group +) + +func router01() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 01", + }, + ) + }) + + return e +} + +func router02() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 02", + }, + ) + }) + + return e +} + +func main() { + server01 := &http.Server{ + Addr: ":8080", + Handler: router01(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + server02 := &http.Server{ + Addr: ":8081", + Handler: router02(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + g.Go(func() error { + err := server01.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + log.Fatal(err) + } + return err + }) + + g.Go(func() error { + err := server02.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + log.Fatal(err) + } + return err + }) + + if err := g.Wait(); err != nil { + log.Fatal(err) + } } ``` -### Graceful restart or stop +### Graceful shutdown or restart + +There are a few approaches you can use to perform a graceful shutdown or restart. You can make use of third-party packages specifically built for that, or you can manually do the same with the functions and methods from the built-in packages. -Do you want to graceful restart or stop your web server? -There are some ways this can be done. +#### Third-party packages -We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details. +We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer to issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details. ```go router := gin.Default() @@ -896,15 +1758,16 @@ router.GET("/", handler) endless.ListenAndServe(":4242", router) ``` -An alternative to endless: +Alternatives: * [manners](https://github.com/braintree/manners): A polite Go HTTP server that shuts down gracefully. * [graceful](https://github.com/tylerb/graceful): Graceful is a Go package enabling graceful shutdown of an http.Handler server. * [grace](https://github.com/facebookgo/grace): Graceful restart & zero downtime deploy for Go servers. -If you are using Go 1.8, you may not need to use this library! Consider using http.Server's built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown) method for graceful shutdowns. See the full [graceful-shutdown](./examples/graceful-shutdown) example with gin. +#### Manually + +In case you are using Go 1.8 or a later version, you may not need to use those libraries. Consider using `http.Server`'s built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown) method for graceful shutdowns. The example below describes its usage, and we've got more examples using gin [here](https://github.com/gin-gonic/examples/tree/master/graceful-shutdown). -[embedmd]:# (examples/graceful-shutdown/graceful-shutdown/server.go go) ```go // +build go1.8 @@ -916,6 +1779,7 @@ import ( "net/http" "os" "os/signal" + "syscall" "time" "github.com/gin-gonic/gin" @@ -933,9 +1797,10 @@ func main() { Handler: router, } + // Initializing the server in a goroutine so that + // it won't block the graceful shutdown handling below go func() { - // service connections - if err := srv.ListenAndServe(); err != nil { + if err := srv.ListenAndServe(); err != nil && errors.Is(err, http.ErrServerClosed) { log.Printf("listen: %s\n", err) } }() @@ -943,35 +1808,410 @@ func main() { // Wait for interrupt signal to gracefully shutdown the server with // a timeout of 5 seconds. quit := make(chan os.Signal) - signal.Notify(quit, os.Interrupt) + // kill (no param) default send syscall.SIGTERM + // kill -2 is syscall.SIGINT + // kill -9 is syscall.SIGKILL but can't be catch, so don't need add it + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) <-quit - log.Println("Shutdown Server ...") + log.Println("Shutting down server...") + // The context is used to inform the server it has 5 seconds to finish + // the request it is currently handling ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() + if err := srv.Shutdown(ctx); err != nil { - log.Fatal("Server Shutdown:", err) + log.Fatal("Server forced to shutdown:", err) + } + + log.Println("Server exiting") +} +``` + +### Build a single binary with templates + +You can build a server into a single binary containing templates by using [go-assets][]. + +[go-assets]: https://github.com/jessevdk/go-assets + +```go +func main() { + r := gin.New() + + t, err := loadTemplate() + if err != nil { + panic(err) + } + r.SetHTMLTemplate(t) + + r.GET("/", func(c *gin.Context) { + c.HTML(http.StatusOK, "/html/index.tmpl",nil) + }) + r.Run(":8080") +} + +// loadTemplate loads templates embedded by go-assets-builder +func loadTemplate() (*template.Template, error) { + t := template.New("") + for name, file := range Assets.Files { + defer file.Close() + if file.IsDir() || !strings.HasSuffix(name, ".tmpl") { + continue + } + h, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + t, err = t.New(name).Parse(string(h)) + if err != nil { + return nil, err + } + } + return t, nil +} +``` + +See a complete example in the `https://github.com/gin-gonic/examples/tree/master/assets-in-binary` directory. + +### Bind form-data request with custom struct + +The follow example using custom struct: + +```go +type StructA struct { + FieldA string `form:"field_a"` +} + +type StructB struct { + NestedStruct StructA + FieldB string `form:"field_b"` +} + +type StructC struct { + NestedStructPointer *StructA + FieldC string `form:"field_c"` +} + +type StructD struct { + NestedAnonyStruct struct { + FieldX string `form:"field_x"` + } + FieldD string `form:"field_d"` +} + +func GetDataB(c *gin.Context) { + var b StructB + c.Bind(&b) + c.JSON(200, gin.H{ + "a": b.NestedStruct, + "b": b.FieldB, + }) +} + +func GetDataC(c *gin.Context) { + var b StructC + c.Bind(&b) + c.JSON(200, gin.H{ + "a": b.NestedStructPointer, + "c": b.FieldC, + }) +} + +func GetDataD(c *gin.Context) { + var b StructD + c.Bind(&b) + c.JSON(200, gin.H{ + "x": b.NestedAnonyStruct, + "d": b.FieldD, + }) +} + +func main() { + r := gin.Default() + r.GET("/getb", GetDataB) + r.GET("/getc", GetDataC) + r.GET("/getd", GetDataD) + + r.Run() +} +``` + +Using the command `curl` command result: + +``` +$ curl "http://localhost:8080/getb?field_a=hello&field_b=world" +{"a":{"FieldA":"hello"},"b":"world"} +$ curl "http://localhost:8080/getc?field_a=hello&field_c=world" +{"a":{"FieldA":"hello"},"c":"world"} +$ curl "http://localhost:8080/getd?field_x=hello&field_d=world" +{"d":"world","x":{"FieldX":"hello"}} +``` + +### Try to bind body into different structs + +The normal methods for binding request body consumes `c.Request.Body` and they +cannot be called multiple times. + +```go +type formA struct { + Foo string `json:"foo" xml:"foo" binding:"required"` +} + +type formB struct { + Bar string `json:"bar" xml:"bar" binding:"required"` +} + +func SomeHandler(c *gin.Context) { + objA := formA{} + objB := formB{} + // This c.ShouldBind consumes c.Request.Body and it cannot be reused. + if errA := c.ShouldBind(&objA); errA == nil { + c.String(http.StatusOK, `the body should be formA`) + // Always an error is occurred by this because c.Request.Body is EOF now. + } else if errB := c.ShouldBind(&objB); errB == nil { + c.String(http.StatusOK, `the body should be formB`) + } else { + ... + } +} +``` + +For this, you can use `c.ShouldBindBodyWith`. + +```go +func SomeHandler(c *gin.Context) { + objA := formA{} + objB := formB{} + // This reads c.Request.Body and stores the result into the context. + if errA := c.ShouldBindBodyWith(&objA, binding.JSON); errA == nil { + c.String(http.StatusOK, `the body should be formA`) + // At this time, it reuses body stored in the context. + } else if errB := c.ShouldBindBodyWith(&objB, binding.JSON); errB == nil { + c.String(http.StatusOK, `the body should be formB JSON`) + // And it can accepts other formats + } else if errB2 := c.ShouldBindBodyWith(&objB, binding.XML); errB2 == nil { + c.String(http.StatusOK, `the body should be formB XML`) + } else { + ... + } +} +``` + +* `c.ShouldBindBodyWith` stores body into the context before binding. This has +a slight impact to performance, so you should not use this method if you are +enough to call binding at once. +* This feature is only needed for some formats -- `JSON`, `XML`, `MsgPack`, +`ProtoBuf`. For other formats, `Query`, `Form`, `FormPost`, `FormMultipart`, +can be called by `c.ShouldBind()` multiple times without any damage to +performance (See [#1341](https://github.com/gin-gonic/gin/pull/1341)). + +### http2 server push + +http.Pusher is supported only **go1.8+**. See the [golang blog](https://blog.golang.org/h2push) for detail information. + +```go +package main + +import ( + "html/template" + "log" + + "github.com/gin-gonic/gin" +) + +var html = template.Must(template.New("https").Parse(` + + + Https Test + + + +

Welcome, Ginner!

+ + +`)) + +func main() { + r := gin.Default() + r.Static("/assets", "./assets") + r.SetHTMLTemplate(html) + + r.GET("/", func(c *gin.Context) { + if pusher := c.Writer.Pusher(); pusher != nil { + // use pusher.Push() to do server push + if err := pusher.Push("/assets/app.js", nil); err != nil { + log.Printf("Failed to push: %v", err) + } + } + c.HTML(200, "https", gin.H{ + "status": "success", + }) + }) + + // Listen and Server in https://127.0.0.1:8080 + r.RunTLS(":8080", "./testdata/server.pem", "./testdata/server.key") +} +``` + +### Define format for the log of routes + +The default log of routes is: +``` +[GIN-debug] POST /foo --> main.main.func1 (3 handlers) +[GIN-debug] GET /bar --> main.main.func2 (3 handlers) +[GIN-debug] GET /status --> main.main.func3 (3 handlers) +``` + +If you want to log this information in given format (e.g. JSON, key values or something else), then you can define this format with `gin.DebugPrintRouteFunc`. +In the example below, we log all routes with standard log package but you can use another log tools that suits of your needs. +```go +import ( + "log" + "net/http" + + "github.com/gin-gonic/gin" +) + +func main() { + r := gin.Default() + gin.DebugPrintRouteFunc = func(httpMethod, absolutePath, handlerName string, nuHandlers int) { + log.Printf("endpoint %v %v %v %v\n", httpMethod, absolutePath, handlerName, nuHandlers) } - log.Println("Server exist") + + r.POST("/foo", func(c *gin.Context) { + c.JSON(http.StatusOK, "foo") + }) + + r.GET("/bar", func(c *gin.Context) { + c.JSON(http.StatusOK, "bar") + }) + + r.GET("/status", func(c *gin.Context) { + c.JSON(http.StatusOK, "ok") + }) + + // Listen and Server in http://0.0.0.0:8080 + r.Run() +} +``` + +### Set and get a cookie + +```go +import ( + "fmt" + + "github.com/gin-gonic/gin" +) + +func main() { + + router := gin.Default() + + router.GET("/cookie", func(c *gin.Context) { + + cookie, err := c.Cookie("gin_cookie") + + if err != nil { + cookie = "NotSet" + c.SetCookie("gin_cookie", "test", 3600, "/", "localhost", false, true) + } + + fmt.Printf("Cookie value: %s \n", cookie) + }) + + router.Run() +} +``` + +## Don't trust all proxies + +Gin lets you specify which headers to hold the real client IP (if any), +as well as specifying which proxies (or direct clients) you trust to +specify one of these headers. + +The `TrustedProxies` slice on your `gin.Engine` specifes network addresses or +network CIDRs from where clients which their request headers related to client +IP can be trusted. They can be IPv4 addresses, IPv4 CIDRs, IPv6 addresses or +IPv6 CIDRs. + +```go +import ( + "fmt" + + "github.com/gin-gonic/gin" +) + +func main() { + + router := gin.Default() + router.TrustedProxies = []string{"192.168.1.2"} + + router.GET("/", func(c *gin.Context) { + // If the client is 192.168.1.2, use the X-Forwarded-For + // header to deduce the original client IP from the trust- + // worthy parts of that header. + // Otherwise, simply return the direct client IP + fmt.Printf("ClientIP: %s\n", c.ClientIP()) + }) + router.Run() +} +``` + +## Testing + +The `net/http/httptest` package is preferable way for HTTP testing. + +```go +package main + +func setupRouter() *gin.Engine { + r := gin.Default() + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + return r +} + +func main() { + r := setupRouter() + r.Run(":8080") } ``` -## Contributing +Test for code example above: + +```go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) -- With issues: - - Use the search tool before opening a new issue. - - Please provide source code and commit sha if you found a bug. - - Review existing issues and provide feedback or react to them. -- With pull requests: - - Open your pull request against develop - - Your pull request should have no more than two commits, if not you should squash them. - - It should pass all tests in the available continuous integrations systems such as TravisCI. - - You should add/modify tests to cover your proposed code changes. - - If your pull request contains a new feature, please document it on the README. +func TestPingRoute(t *testing.T) { + router := setupRouter() + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/ping", nil) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "pong", w.Body.String()) +} +``` ## Users Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framework. -* [drone](https://github.com/drone/drone): Drone is a Continuous Delivery platform built on Docker, written in Go * [gorush](https://github.com/appleboy/gorush): A push notification server written in Go. +* [fnproject](https://github.com/fnproject/fn): The container native, cloud agnostic serverless platform. +* [photoprism](https://github.com/photoprism/photoprism): Personal photo management powered by Go and Google TensorFlow. +* [krakend](https://github.com/devopsfaith/krakend): Ultra performant API Gateway with middlewares. +* [picfit](https://github.com/thoas/picfit): An image resizing server written in Go. +* [brigade](https://github.com/brigadecore/brigade): Event-based Scripting for Kubernetes. +* [dkron](https://github.com/distribworks/dkron): Distributed, fault tolerant job scheduling system. diff --git a/vendor/github.com/gin-gonic/gin/auth.go b/vendor/github.com/gin-gonic/gin/auth.go index 125e659f2..4d8a6ce48 100644 --- a/vendor/github.com/gin-gonic/gin/auth.go +++ b/vendor/github.com/gin-gonic/gin/auth.go @@ -7,27 +7,32 @@ package gin import ( "crypto/subtle" "encoding/base64" + "net/http" "strconv" + + "github.com/gin-gonic/gin/internal/bytesconv" ) +// AuthUserKey is the cookie name for user credential in basic auth. const AuthUserKey = "user" -type ( - Accounts map[string]string - authPair struct { - Value string - User string - } - authPairs []authPair -) +// Accounts defines a key/value for user/pass list of authorized logins. +type Accounts map[string]string + +type authPair struct { + value string + user string +} + +type authPairs []authPair func (a authPairs) searchCredential(authValue string) (string, bool) { - if len(authValue) == 0 { + if authValue == "" { return "", false } for _, pair := range a { - if pair.Value == authValue { - return pair.User, true + if subtle.ConstantTimeCompare([]byte(pair.value), []byte(authValue)) == 1 { + return pair.user, true } } return "", false @@ -45,16 +50,17 @@ func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc { pairs := processAccounts(accounts) return func(c *Context) { // Search user in the slice of allowed credentials - user, found := pairs.searchCredential(c.Request.Header.Get("Authorization")) + user, found := pairs.searchCredential(c.requestHeader("Authorization")) if !found { // Credentials doesn't match, we return 401 and abort handlers chain. c.Header("WWW-Authenticate", realm) - c.AbortWithStatus(401) - } else { - // The user credentials was found, set user's id to key AuthUserKey in this context, the userId can be read later using - // c.MustGet(gin.AuthUserKey) - c.Set(AuthUserKey, user) + c.AbortWithStatus(http.StatusUnauthorized) + return } + + // The user credentials was found, set user's id to key AuthUserKey in this context, the user's id can be read later using + // c.MustGet(gin.AuthUserKey). + c.Set(AuthUserKey, user) } } @@ -65,14 +71,15 @@ func BasicAuth(accounts Accounts) HandlerFunc { } func processAccounts(accounts Accounts) authPairs { - assert1(len(accounts) > 0, "Empty list of authorized credentials") - pairs := make(authPairs, 0, len(accounts)) + length := len(accounts) + assert1(length > 0, "Empty list of authorized credentials") + pairs := make(authPairs, 0, length) for user, password := range accounts { - assert1(len(user) > 0, "User can not be empty") + assert1(user != "", "User can not be empty") value := authorizationHeader(user, password) pairs = append(pairs, authPair{ - Value: value, - User: user, + value: value, + user: user, }) } return pairs @@ -80,13 +87,5 @@ func processAccounts(accounts Accounts) authPairs { func authorizationHeader(user, password string) string { base := user + ":" + password - return "Basic " + base64.StdEncoding.EncodeToString([]byte(base)) -} - -func secureCompare(given, actual string) bool { - if subtle.ConstantTimeEq(int32(len(given)), int32(len(actual))) == 1 { - return subtle.ConstantTimeCompare([]byte(given), []byte(actual)) == 1 - } - /* Securely compare actual to itself to keep constant time, but always return false */ - return subtle.ConstantTimeCompare([]byte(actual), []byte(actual)) == 1 && false + return "Basic " + base64.StdEncoding.EncodeToString(bytesconv.StringToBytes(base)) } diff --git a/vendor/github.com/gin-gonic/gin/binding/binding.go b/vendor/github.com/gin-gonic/gin/binding/binding.go index 1dbf2460a..5caeb581a 100644 --- a/vendor/github.com/gin-gonic/gin/binding/binding.go +++ b/vendor/github.com/gin-gonic/gin/binding/binding.go @@ -2,10 +2,14 @@ // Use of this source code is governed by a MIT style // license that can be found in the LICENSE file. +//go:build !nomsgpack +// +build !nomsgpack + package binding import "net/http" +// Content-Type MIME of the most common data formats. const ( MIMEJSON = "application/json" MIMEHTML = "text/html" @@ -17,36 +21,74 @@ const ( MIMEPROTOBUF = "application/x-protobuf" MIMEMSGPACK = "application/x-msgpack" MIMEMSGPACK2 = "application/msgpack" + MIMEYAML = "application/x-yaml" ) +// Binding describes the interface which needs to be implemented for binding the +// data present in the request such as JSON request body, query parameters or +// the form POST. type Binding interface { Name() string Bind(*http.Request, interface{}) error } +// BindingBody adds BindBody method to Binding. BindBody is similar with Bind, +// but it reads the body from supplied bytes instead of req.Body. +type BindingBody interface { + Binding + BindBody([]byte, interface{}) error +} + +// BindingUri adds BindUri method to Binding. BindUri is similar with Bind, +// but it read the Params. +type BindingUri interface { + Name() string + BindUri(map[string][]string, interface{}) error +} + +// StructValidator is the minimal interface which needs to be implemented in +// order for it to be used as the validator engine for ensuring the correctness +// of the request. Gin provides a default implementation for this using +// https://github.com/go-playground/validator/tree/v8.18.2. type StructValidator interface { // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right. - // If the received type is not a struct, any validation should be skipped and nil must be returned. + // If the received type is a slice|array, the validation should be performed travel on every element. + // If the received type is not a struct or slice|array, any validation should be skipped and nil must be returned. // If the received type is a struct or pointer to a struct, the validation should be performed. // If the struct is not valid or the validation itself fails, a descriptive error should be returned. // Otherwise nil must be returned. ValidateStruct(interface{}) error + + // Engine returns the underlying validator engine which powers the + // StructValidator implementation. + Engine() interface{} } +// Validator is the default validator which implements the StructValidator +// interface. It uses https://github.com/go-playground/validator/tree/v8.18.2 +// under the hood. var Validator StructValidator = &defaultValidator{} +// These implement the Binding interface and can be used to bind the data +// present in the request to struct instances. var ( JSON = jsonBinding{} XML = xmlBinding{} Form = formBinding{} + Query = queryBinding{} FormPost = formPostBinding{} FormMultipart = formMultipartBinding{} ProtoBuf = protobufBinding{} MsgPack = msgpackBinding{} + YAML = yamlBinding{} + Uri = uriBinding{} + Header = headerBinding{} ) +// Default returns the appropriate Binding instance based on the HTTP method +// and the content type. func Default(method, contentType string) Binding { - if method == "GET" { + if method == http.MethodGet { return Form } @@ -59,7 +101,11 @@ func Default(method, contentType string) Binding { return ProtoBuf case MIMEMSGPACK, MIMEMSGPACK2: return MsgPack - default: //case MIMEPOSTForm, MIMEMultipartPOSTForm: + case MIMEYAML: + return YAML + case MIMEMultipartPOSTForm: + return FormMultipart + default: // case MIMEPOSTForm: return Form } } diff --git a/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go b/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go new file mode 100644 index 000000000..9afa3dcf6 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go @@ -0,0 +1,112 @@ +// Copyright 2020 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build nomsgpack +// +build nomsgpack + +package binding + +import "net/http" + +// Content-Type MIME of the most common data formats. +const ( + MIMEJSON = "application/json" + MIMEHTML = "text/html" + MIMEXML = "application/xml" + MIMEXML2 = "text/xml" + MIMEPlain = "text/plain" + MIMEPOSTForm = "application/x-www-form-urlencoded" + MIMEMultipartPOSTForm = "multipart/form-data" + MIMEPROTOBUF = "application/x-protobuf" + MIMEYAML = "application/x-yaml" +) + +// Binding describes the interface which needs to be implemented for binding the +// data present in the request such as JSON request body, query parameters or +// the form POST. +type Binding interface { + Name() string + Bind(*http.Request, interface{}) error +} + +// BindingBody adds BindBody method to Binding. BindBody is similar with Bind, +// but it reads the body from supplied bytes instead of req.Body. +type BindingBody interface { + Binding + BindBody([]byte, interface{}) error +} + +// BindingUri adds BindUri method to Binding. BindUri is similar with Bind, +// but it read the Params. +type BindingUri interface { + Name() string + BindUri(map[string][]string, interface{}) error +} + +// StructValidator is the minimal interface which needs to be implemented in +// order for it to be used as the validator engine for ensuring the correctness +// of the request. Gin provides a default implementation for this using +// https://github.com/go-playground/validator/tree/v8.18.2. +type StructValidator interface { + // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right. + // If the received type is not a struct, any validation should be skipped and nil must be returned. + // If the received type is a struct or pointer to a struct, the validation should be performed. + // If the struct is not valid or the validation itself fails, a descriptive error should be returned. + // Otherwise nil must be returned. + ValidateStruct(interface{}) error + + // Engine returns the underlying validator engine which powers the + // StructValidator implementation. + Engine() interface{} +} + +// Validator is the default validator which implements the StructValidator +// interface. It uses https://github.com/go-playground/validator/tree/v8.18.2 +// under the hood. +var Validator StructValidator = &defaultValidator{} + +// These implement the Binding interface and can be used to bind the data +// present in the request to struct instances. +var ( + JSON = jsonBinding{} + XML = xmlBinding{} + Form = formBinding{} + Query = queryBinding{} + FormPost = formPostBinding{} + FormMultipart = formMultipartBinding{} + ProtoBuf = protobufBinding{} + YAML = yamlBinding{} + Uri = uriBinding{} + Header = headerBinding{} +) + +// Default returns the appropriate Binding instance based on the HTTP method +// and the content type. +func Default(method, contentType string) Binding { + if method == "GET" { + return Form + } + + switch contentType { + case MIMEJSON: + return JSON + case MIMEXML, MIMEXML2: + return XML + case MIMEPROTOBUF: + return ProtoBuf + case MIMEYAML: + return YAML + case MIMEMultipartPOSTForm: + return FormMultipart + default: // case MIMEPOSTForm: + return Form + } +} + +func validate(obj interface{}) error { + if Validator == nil { + return nil + } + return Validator.ValidateStruct(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/default_validator.go b/vendor/github.com/gin-gonic/gin/binding/default_validator.go index 19885f164..c57a120fc 100644 --- a/vendor/github.com/gin-gonic/gin/binding/default_validator.go +++ b/vendor/github.com/gin-gonic/gin/binding/default_validator.go @@ -5,10 +5,12 @@ package binding import ( + "fmt" "reflect" + "strings" "sync" - "gopkg.in/go-playground/validator.v8" + "github.com/go-playground/validator/v10" ) type defaultValidator struct { @@ -16,30 +18,68 @@ type defaultValidator struct { validate *validator.Validate } +type sliceValidateError []error + +func (err sliceValidateError) Error() string { + var errMsgs []string + for i, e := range err { + if e == nil { + continue + } + errMsgs = append(errMsgs, fmt.Sprintf("[%d]: %s", i, e.Error())) + } + return strings.Join(errMsgs, "\n") +} + var _ StructValidator = &defaultValidator{} +// ValidateStruct receives any kind of type, but only performed struct or pointer to struct type. func (v *defaultValidator) ValidateStruct(obj interface{}) error { - if kindOfData(obj) == reflect.Struct { - v.lazyinit() - if err := v.validate.Struct(obj); err != nil { - return error(err) + if obj == nil { + return nil + } + + value := reflect.ValueOf(obj) + switch value.Kind() { + case reflect.Ptr: + return v.ValidateStruct(value.Elem().Interface()) + case reflect.Struct: + return v.validateStruct(obj) + case reflect.Slice, reflect.Array: + count := value.Len() + validateRet := make(sliceValidateError, 0) + for i := 0; i < count; i++ { + if err := v.ValidateStruct(value.Index(i).Interface()); err != nil { + validateRet = append(validateRet, err) + } + } + if len(validateRet) == 0 { + return nil } + return validateRet + default: + return nil } - return nil +} + +// validateStruct receives struct type +func (v *defaultValidator) validateStruct(obj interface{}) error { + v.lazyinit() + return v.validate.Struct(obj) +} + +// Engine returns the underlying validator engine which powers the default +// Validator instance. This is useful if you want to register custom validations +// or struct level validations. See validator GoDoc for more info - +// https://godoc.org/gopkg.in/go-playground/validator.v8 +func (v *defaultValidator) Engine() interface{} { + v.lazyinit() + return v.validate } func (v *defaultValidator) lazyinit() { v.once.Do(func() { - config := &validator.Config{TagName: "binding"} - v.validate = validator.New(config) + v.validate = validator.New() + v.validate.SetTagName("binding") }) } - -func kindOfData(data interface{}) reflect.Kind { - value := reflect.ValueOf(data) - valueType := value.Kind() - if valueType == reflect.Ptr { - valueType = value.Elem().Kind() - } - return valueType -} diff --git a/vendor/github.com/gin-gonic/gin/binding/form.go b/vendor/github.com/gin-gonic/gin/binding/form.go index 557333e6f..b93c34cf4 100644 --- a/vendor/github.com/gin-gonic/gin/binding/form.go +++ b/vendor/github.com/gin-gonic/gin/binding/form.go @@ -4,7 +4,11 @@ package binding -import "net/http" +import ( + "net/http" +) + +const defaultMemory = 32 << 20 type formBinding struct{} type formPostBinding struct{} @@ -18,7 +22,11 @@ func (formBinding) Bind(req *http.Request, obj interface{}) error { if err := req.ParseForm(); err != nil { return err } - req.ParseMultipartForm(32 << 10) // 32 MB + if err := req.ParseMultipartForm(defaultMemory); err != nil { + if err != http.ErrNotMultipart { + return err + } + } if err := mapForm(obj, req.Form); err != nil { return err } @@ -44,11 +52,12 @@ func (formMultipartBinding) Name() string { } func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error { - if err := req.ParseMultipartForm(32 << 10); err != nil { + if err := req.ParseMultipartForm(defaultMemory); err != nil { return err } - if err := mapForm(obj, req.MultipartForm.Value); err != nil { + if err := mappingByPtr(obj, (*multipartRequest)(req), "form"); err != nil { return err } + return validate(obj) } diff --git a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go index 34f126781..2f4e45b40 100644 --- a/vendor/github.com/gin-gonic/gin/binding/form_mapping.go +++ b/vendor/github.com/gin-gonic/gin/binding/form_mapping.go @@ -6,99 +6,229 @@ package binding import ( "errors" + "fmt" "reflect" "strconv" + "strings" "time" + + "github.com/gin-gonic/gin/internal/bytesconv" + "github.com/gin-gonic/gin/internal/json" ) +var errUnknownType = errors.New("unknown type") + +func mapUri(ptr interface{}, m map[string][]string) error { + return mapFormByTag(ptr, m, "uri") +} + func mapForm(ptr interface{}, form map[string][]string) error { - typ := reflect.TypeOf(ptr).Elem() - val := reflect.ValueOf(ptr).Elem() - for i := 0; i < typ.NumField(); i++ { - typeField := typ.Field(i) - structField := val.Field(i) - if !structField.CanSet() { - continue - } - - structFieldKind := structField.Kind() - inputFieldName := typeField.Tag.Get("form") - if inputFieldName == "" { - inputFieldName = typeField.Name - - // if "form" tag is nil, we inspect if the field is a struct. - // this would not make sense for JSON parsing but it does for a form - // since data is flatten - if structFieldKind == reflect.Struct { - err := mapForm(structField.Addr().Interface(), form) - if err != nil { - return err - } - continue - } + return mapFormByTag(ptr, form, "form") +} + +var emptyField = reflect.StructField{} + +func mapFormByTag(ptr interface{}, form map[string][]string, tag string) error { + // Check if ptr is a map + ptrVal := reflect.ValueOf(ptr) + var pointed interface{} + if ptrVal.Kind() == reflect.Ptr { + ptrVal = ptrVal.Elem() + pointed = ptrVal.Interface() + } + if ptrVal.Kind() == reflect.Map && + ptrVal.Type().Key().Kind() == reflect.String { + if pointed != nil { + ptr = pointed + } + return setFormMap(ptr, form) + } + + return mappingByPtr(ptr, formSource(form), tag) +} + +// setter tries to set value on a walking by fields of a struct +type setter interface { + TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error) +} + +type formSource map[string][]string + +var _ setter = formSource(nil) + +// TrySet tries to set a value by request's form source (like map[string][]string) +func (form formSource) TrySet(value reflect.Value, field reflect.StructField, tagValue string, opt setOptions) (isSetted bool, err error) { + return setByForm(value, field, form, tagValue, opt) +} + +func mappingByPtr(ptr interface{}, setter setter, tag string) error { + _, err := mapping(reflect.ValueOf(ptr), emptyField, setter, tag) + return err +} + +func mapping(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) { + if field.Tag.Get(tag) == "-" { // just ignoring this field + return false, nil + } + + var vKind = value.Kind() + + if vKind == reflect.Ptr { + var isNew bool + vPtr := value + if value.IsNil() { + isNew = true + vPtr = reflect.New(value.Type().Elem()) } - inputValue, exists := form[inputFieldName] - if !exists { - continue + isSetted, err := mapping(vPtr.Elem(), field, setter, tag) + if err != nil { + return false, err } + if isNew && isSetted { + value.Set(vPtr) + } + return isSetted, nil + } - numElems := len(inputValue) - if structFieldKind == reflect.Slice && numElems > 0 { - sliceOf := structField.Type().Elem().Kind() - slice := reflect.MakeSlice(structField.Type(), numElems, numElems) - for i := 0; i < numElems; i++ { - if err := setWithProperType(sliceOf, inputValue[i], slice.Index(i)); err != nil { - return err - } - } - val.Field(i).Set(slice) - } else { - if _, isTime := structField.Interface().(time.Time); isTime { - if err := setTimeField(inputValue[0], typeField, structField); err != nil { - return err - } + if vKind != reflect.Struct || !field.Anonymous { + ok, err := tryToSetValue(value, field, setter, tag) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + + if vKind == reflect.Struct { + tValue := value.Type() + + var isSetted bool + for i := 0; i < value.NumField(); i++ { + sf := tValue.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported continue } - if err := setWithProperType(typeField.Type.Kind(), inputValue[0], structField); err != nil { - return err + ok, err := mapping(value.Field(i), tValue.Field(i), setter, tag) + if err != nil { + return false, err } + isSetted = isSetted || ok } + return isSetted, nil + } + return false, nil +} + +type setOptions struct { + isDefaultExists bool + defaultValue string +} + +func tryToSetValue(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) { + var tagValue string + var setOpt setOptions + + tagValue = field.Tag.Get(tag) + tagValue, opts := head(tagValue, ",") + + if tagValue == "" { // default value is FieldName + tagValue = field.Name + } + if tagValue == "" { // when field is "emptyField" variable + return false, nil + } + + var opt string + for len(opts) > 0 { + opt, opts = head(opts, ",") + + if k, v := head(opt, "="); k == "default" { + setOpt.isDefaultExists = true + setOpt.defaultValue = v + } + } + + return setter.TrySet(value, field, tagValue, setOpt) +} + +func setByForm(value reflect.Value, field reflect.StructField, form map[string][]string, tagValue string, opt setOptions) (isSetted bool, err error) { + vs, ok := form[tagValue] + if !ok && !opt.isDefaultExists { + return false, nil + } + + switch value.Kind() { + case reflect.Slice: + if !ok { + vs = []string{opt.defaultValue} + } + return true, setSlice(vs, value, field) + case reflect.Array: + if !ok { + vs = []string{opt.defaultValue} + } + if len(vs) != value.Len() { + return false, fmt.Errorf("%q is not valid value for %s", vs, value.Type().String()) + } + return true, setArray(vs, value, field) + default: + var val string + if !ok { + val = opt.defaultValue + } + + if len(vs) > 0 { + val = vs[0] + } + return true, setWithProperType(val, value, field) } - return nil } -func setWithProperType(valueKind reflect.Kind, val string, structField reflect.Value) error { - switch valueKind { +func setWithProperType(val string, value reflect.Value, field reflect.StructField) error { + switch value.Kind() { case reflect.Int: - return setIntField(val, 0, structField) + return setIntField(val, 0, value) case reflect.Int8: - return setIntField(val, 8, structField) + return setIntField(val, 8, value) case reflect.Int16: - return setIntField(val, 16, structField) + return setIntField(val, 16, value) case reflect.Int32: - return setIntField(val, 32, structField) + return setIntField(val, 32, value) case reflect.Int64: - return setIntField(val, 64, structField) + switch value.Interface().(type) { + case time.Duration: + return setTimeDuration(val, value, field) + } + return setIntField(val, 64, value) case reflect.Uint: - return setUintField(val, 0, structField) + return setUintField(val, 0, value) case reflect.Uint8: - return setUintField(val, 8, structField) + return setUintField(val, 8, value) case reflect.Uint16: - return setUintField(val, 16, structField) + return setUintField(val, 16, value) case reflect.Uint32: - return setUintField(val, 32, structField) + return setUintField(val, 32, value) case reflect.Uint64: - return setUintField(val, 64, structField) + return setUintField(val, 64, value) case reflect.Bool: - return setBoolField(val, structField) + return setBoolField(val, value) case reflect.Float32: - return setFloatField(val, 32, structField) + return setFloatField(val, 32, value) case reflect.Float64: - return setFloatField(val, 64, structField) + return setFloatField(val, 64, value) case reflect.String: - structField.SetString(val) + value.SetString(val) + case reflect.Struct: + switch value.Interface().(type) { + case time.Time: + return setTimeField(val, field, value) + } + return json.Unmarshal(bytesconv.StringToBytes(val), value.Addr().Interface()) + case reflect.Map: + return json.Unmarshal(bytesconv.StringToBytes(val), value.Addr().Interface()) default: - return errors.New("Unknown type") + return errUnknownType } return nil } @@ -133,7 +263,7 @@ func setBoolField(val string, field reflect.Value) error { if err == nil { field.SetBool(boolVal) } - return nil + return err } func setFloatField(val string, bitSize int, field reflect.Value) error { @@ -150,7 +280,25 @@ func setFloatField(val string, bitSize int, field reflect.Value) error { func setTimeField(val string, structField reflect.StructField, value reflect.Value) error { timeFormat := structField.Tag.Get("time_format") if timeFormat == "" { - return errors.New("Blank time format") + timeFormat = time.RFC3339 + } + + switch tf := strings.ToLower(timeFormat); tf { + case "unix", "unixnano": + tv, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return err + } + + d := time.Duration(1) + if tf == "unixnano" { + d = time.Second + } + + t := time.Unix(tv/int64(d), tv%int64(d)) + value.Set(reflect.ValueOf(t)) + return nil + } if val == "" { @@ -163,6 +311,14 @@ func setTimeField(val string, structField reflect.StructField, value reflect.Val l = time.UTC } + if locTag := structField.Tag.Get("time_location"); locTag != "" { + loc, err := time.LoadLocation(locTag) + if err != nil { + return err + } + l = loc + } + t, err := time.ParseInLocation(timeFormat, val, l) if err != nil { return err @@ -172,11 +328,65 @@ func setTimeField(val string, structField reflect.StructField, value reflect.Val return nil } -// Don't pass in pointers to bind to. Can lead to bugs. See: -// https://github.com/codegangsta/martini-contrib/issues/40 -// https://github.com/codegangsta/martini-contrib/pull/34#issuecomment-29683659 -func ensureNotPointer(obj interface{}) { - if reflect.TypeOf(obj).Kind() == reflect.Ptr { - panic("Pointers are not accepted as binding models") +func setArray(vals []string, value reflect.Value, field reflect.StructField) error { + for i, s := range vals { + err := setWithProperType(s, value.Index(i), field) + if err != nil { + return err + } } + return nil +} + +func setSlice(vals []string, value reflect.Value, field reflect.StructField) error { + slice := reflect.MakeSlice(value.Type(), len(vals), len(vals)) + err := setArray(vals, slice, field) + if err != nil { + return err + } + value.Set(slice) + return nil +} + +func setTimeDuration(val string, value reflect.Value, field reflect.StructField) error { + d, err := time.ParseDuration(val) + if err != nil { + return err + } + value.Set(reflect.ValueOf(d)) + return nil +} + +func head(str, sep string) (head string, tail string) { + idx := strings.Index(str, sep) + if idx < 0 { + return str, "" + } + return str[:idx], str[idx+len(sep):] +} + +func setFormMap(ptr interface{}, form map[string][]string) error { + el := reflect.TypeOf(ptr).Elem() + + if el.Kind() == reflect.Slice { + ptrMap, ok := ptr.(map[string][]string) + if !ok { + return errors.New("cannot convert to map slices of strings") + } + for k, v := range form { + ptrMap[k] = v + } + + return nil + } + + ptrMap, ok := ptr.(map[string]string) + if !ok { + return errors.New("cannot convert to map of strings") + } + for k, v := range form { + ptrMap[k] = v[len(v)-1] // pick last + } + + return nil } diff --git a/vendor/github.com/gin-gonic/gin/binding/header.go b/vendor/github.com/gin-gonic/gin/binding/header.go new file mode 100644 index 000000000..179ce4ea2 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/header.go @@ -0,0 +1,34 @@ +package binding + +import ( + "net/http" + "net/textproto" + "reflect" +) + +type headerBinding struct{} + +func (headerBinding) Name() string { + return "header" +} + +func (headerBinding) Bind(req *http.Request, obj interface{}) error { + + if err := mapHeader(obj, req.Header); err != nil { + return err + } + + return validate(obj) +} + +func mapHeader(ptr interface{}, h map[string][]string) error { + return mappingByPtr(ptr, headerSource(h), "header") +} + +type headerSource map[string][]string + +var _ setter = headerSource(nil) + +func (hs headerSource) TrySet(value reflect.Value, field reflect.StructField, tagValue string, opt setOptions) (isSetted bool, err error) { + return setByForm(value, field, hs, textproto.CanonicalMIMEHeaderKey(tagValue), opt) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/json.go b/vendor/github.com/gin-gonic/gin/binding/json.go index 486b9733b..d62e07059 100644 --- a/vendor/github.com/gin-gonic/gin/binding/json.go +++ b/vendor/github.com/gin-gonic/gin/binding/json.go @@ -5,10 +5,25 @@ package binding import ( - "encoding/json" + "bytes" + "fmt" + "io" "net/http" + + "github.com/gin-gonic/gin/internal/json" ) +// EnableDecoderUseNumber is used to call the UseNumber method on the JSON +// Decoder instance. UseNumber causes the Decoder to unmarshal a number into an +// interface{} as a Number instead of as a float64. +var EnableDecoderUseNumber = false + +// EnableDecoderDisallowUnknownFields is used to call the DisallowUnknownFields method +// on the JSON Decoder instance. DisallowUnknownFields causes the Decoder to +// return an error when the destination is a struct and the input contains object +// keys which do not match any non-ignored, exported fields in the destination. +var EnableDecoderDisallowUnknownFields = false + type jsonBinding struct{} func (jsonBinding) Name() string { @@ -16,7 +31,24 @@ func (jsonBinding) Name() string { } func (jsonBinding) Bind(req *http.Request, obj interface{}) error { - decoder := json.NewDecoder(req.Body) + if req == nil || req.Body == nil { + return fmt.Errorf("invalid request") + } + return decodeJSON(req.Body, obj) +} + +func (jsonBinding) BindBody(body []byte, obj interface{}) error { + return decodeJSON(bytes.NewReader(body), obj) +} + +func decodeJSON(r io.Reader, obj interface{}) error { + decoder := json.NewDecoder(r) + if EnableDecoderUseNumber { + decoder.UseNumber() + } + if EnableDecoderDisallowUnknownFields { + decoder.DisallowUnknownFields() + } if err := decoder.Decode(obj); err != nil { return err } diff --git a/vendor/github.com/gin-gonic/gin/binding/msgpack.go b/vendor/github.com/gin-gonic/gin/binding/msgpack.go index 69367175b..2a442996a 100644 --- a/vendor/github.com/gin-gonic/gin/binding/msgpack.go +++ b/vendor/github.com/gin-gonic/gin/binding/msgpack.go @@ -2,9 +2,14 @@ // Use of this source code is governed by a MIT style // license that can be found in the LICENSE file. +//go:build !nomsgpack +// +build !nomsgpack + package binding import ( + "bytes" + "io" "net/http" "github.com/ugorji/go/codec" @@ -17,12 +22,17 @@ func (msgpackBinding) Name() string { } func (msgpackBinding) Bind(req *http.Request, obj interface{}) error { + return decodeMsgPack(req.Body, obj) +} - if err := codec.NewDecoder(req.Body, new(codec.MsgpackHandle)).Decode(&obj); err != nil { - //var decoder *codec.Decoder = codec.NewDecoder(req.Body, &codec.MsgpackHandle) - //if err := decoder.Decode(&obj); err != nil { +func (msgpackBinding) BindBody(body []byte, obj interface{}) error { + return decodeMsgPack(bytes.NewReader(body), obj) +} + +func decodeMsgPack(r io.Reader, obj interface{}) error { + cdc := new(codec.MsgpackHandle) + if err := codec.NewDecoder(r, cdc).Decode(&obj); err != nil { return err } return validate(obj) - } diff --git a/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go b/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go new file mode 100644 index 000000000..f85a1aa60 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go @@ -0,0 +1,66 @@ +// Copyright 2019 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "errors" + "mime/multipart" + "net/http" + "reflect" +) + +type multipartRequest http.Request + +var _ setter = (*multipartRequest)(nil) + +// TrySet tries to set a value by the multipart request with the binding a form file +func (r *multipartRequest) TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error) { + if files := r.MultipartForm.File[key]; len(files) != 0 { + return setByMultipartFormFile(value, field, files) + } + + return setByForm(value, field, r.MultipartForm.Value, key, opt) +} + +func setByMultipartFormFile(value reflect.Value, field reflect.StructField, files []*multipart.FileHeader) (isSetted bool, err error) { + switch value.Kind() { + case reflect.Ptr: + switch value.Interface().(type) { + case *multipart.FileHeader: + value.Set(reflect.ValueOf(files[0])) + return true, nil + } + case reflect.Struct: + switch value.Interface().(type) { + case multipart.FileHeader: + value.Set(reflect.ValueOf(*files[0])) + return true, nil + } + case reflect.Slice: + slice := reflect.MakeSlice(value.Type(), len(files), len(files)) + isSetted, err = setArrayOfMultipartFormFiles(slice, field, files) + if err != nil || !isSetted { + return isSetted, err + } + value.Set(slice) + return true, nil + case reflect.Array: + return setArrayOfMultipartFormFiles(value, field, files) + } + return false, errors.New("unsupported field type for multipart.FileHeader") +} + +func setArrayOfMultipartFormFiles(value reflect.Value, field reflect.StructField, files []*multipart.FileHeader) (isSetted bool, err error) { + if value.Len() != len(files) { + return false, errors.New("unsupported len of array for []*multipart.FileHeader") + } + for i := range files { + setted, err := setByMultipartFormFile(value.Index(i), field, files[i:i+1]) + if err != nil || !setted { + return setted, err + } + } + return true, nil +} diff --git a/vendor/github.com/gin-gonic/gin/binding/protobuf.go b/vendor/github.com/gin-gonic/gin/binding/protobuf.go index c7eb84e9b..f9ece928d 100644 --- a/vendor/github.com/gin-gonic/gin/binding/protobuf.go +++ b/vendor/github.com/gin-gonic/gin/binding/protobuf.go @@ -17,19 +17,20 @@ func (protobufBinding) Name() string { return "protobuf" } -func (protobufBinding) Bind(req *http.Request, obj interface{}) error { - +func (b protobufBinding) Bind(req *http.Request, obj interface{}) error { buf, err := ioutil.ReadAll(req.Body) if err != nil { return err } + return b.BindBody(buf, obj) +} - if err = proto.Unmarshal(buf, obj.(proto.Message)); err != nil { +func (protobufBinding) BindBody(body []byte, obj interface{}) error { + if err := proto.Unmarshal(body, obj.(proto.Message)); err != nil { return err } - - //Here it's same to return validate(obj), but util now we cann't add `binding:""` to the struct - //which automatically generate by gen-proto + // Here it's same to return validate(obj), but util now we can't add + // `binding:""` to the struct which automatically generate by gen-proto return nil - //return validate(obj) + // return validate(obj) } diff --git a/vendor/github.com/gin-gonic/gin/binding/query.go b/vendor/github.com/gin-gonic/gin/binding/query.go new file mode 100644 index 000000000..219743f2a --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/query.go @@ -0,0 +1,21 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import "net/http" + +type queryBinding struct{} + +func (queryBinding) Name() string { + return "query" +} + +func (queryBinding) Bind(req *http.Request, obj interface{}) error { + values := req.URL.Query() + if err := mapForm(obj, values); err != nil { + return err + } + return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/uri.go b/vendor/github.com/gin-gonic/gin/binding/uri.go new file mode 100644 index 000000000..f91ec3819 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/uri.go @@ -0,0 +1,18 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +type uriBinding struct{} + +func (uriBinding) Name() string { + return "uri" +} + +func (uriBinding) BindUri(m map[string][]string, obj interface{}) error { + if err := mapUri(obj, m); err != nil { + return err + } + return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/binding/xml.go b/vendor/github.com/gin-gonic/gin/binding/xml.go index f84a6b7f1..4e9011496 100644 --- a/vendor/github.com/gin-gonic/gin/binding/xml.go +++ b/vendor/github.com/gin-gonic/gin/binding/xml.go @@ -5,7 +5,9 @@ package binding import ( + "bytes" "encoding/xml" + "io" "net/http" ) @@ -16,7 +18,14 @@ func (xmlBinding) Name() string { } func (xmlBinding) Bind(req *http.Request, obj interface{}) error { - decoder := xml.NewDecoder(req.Body) + return decodeXML(req.Body, obj) +} + +func (xmlBinding) BindBody(body []byte, obj interface{}) error { + return decodeXML(bytes.NewReader(body), obj) +} +func decodeXML(r io.Reader, obj interface{}) error { + decoder := xml.NewDecoder(r) if err := decoder.Decode(obj); err != nil { return err } diff --git a/vendor/github.com/gin-gonic/gin/binding/yaml.go b/vendor/github.com/gin-gonic/gin/binding/yaml.go new file mode 100644 index 000000000..a2d36d6a5 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/binding/yaml.go @@ -0,0 +1,35 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "io" + "net/http" + + "gopkg.in/yaml.v2" +) + +type yamlBinding struct{} + +func (yamlBinding) Name() string { + return "yaml" +} + +func (yamlBinding) Bind(req *http.Request, obj interface{}) error { + return decodeYAML(req.Body, obj) +} + +func (yamlBinding) BindBody(body []byte, obj interface{}) error { + return decodeYAML(bytes.NewReader(body), obj) +} + +func decodeYAML(r io.Reader, obj interface{}) error { + decoder := yaml.NewDecoder(r) + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/vendor/github.com/gin-gonic/gin/context.go b/vendor/github.com/gin-gonic/gin/context.go index 5c4d27dca..dc03c358a 100644 --- a/vendor/github.com/gin-gonic/gin/context.go +++ b/vendor/github.com/gin-gonic/gin/context.go @@ -6,6 +6,7 @@ package gin import ( "errors" + "fmt" "io" "io/ioutil" "math" @@ -13,7 +14,9 @@ import ( "net" "net/http" "net/url" + "os" "strings" + "sync" "time" "github.com/gin-contrib/sse" @@ -21,7 +24,7 @@ import ( "github.com/gin-gonic/gin/render" ) -// Content-Type MIME of the most common data formats +// Content-Type MIME of the most common data formats. const ( MIMEJSON = binding.MIMEJSON MIMEHTML = binding.MIMEHTML @@ -30,12 +33,13 @@ const ( MIMEPlain = binding.MIMEPlain MIMEPOSTForm = binding.MIMEPOSTForm MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm + MIMEYAML = binding.MIMEYAML ) -const ( - defaultMemory = 32 << 20 // 32 MB - abortIndex int8 = math.MaxInt8 / 2 -) +// BodyBytesKey indicates a default body bytes key. +const BodyBytesKey = "_gin-gonic/gin/bodybyteskey" + +const abortIndex int8 = math.MaxInt8 / 2 // Context is the most important part of gin. It allows us to pass variables between middleware, // manage the flow, validate the JSON of a request and render a JSON response for example. @@ -47,11 +51,33 @@ type Context struct { Params Params handlers HandlersChain index int8 + fullPath string + + engine *Engine + params *Params + + // This mutex protect Keys map + mu sync.RWMutex - engine *Engine - Keys map[string]interface{} - Errors errorMsgs + // Keys is a key/value pair exclusively for the context of each request. + Keys map[string]interface{} + + // Errors is a list of errors attached to all the handlers/middlewares who used this context. + Errors errorMsgs + + // Accepted defines a list of manually accepted formats for content negotiation. Accepted []string + + // queryCache use url.ParseQuery cached the param query result from c.Request.URL.Query() + queryCache url.Values + + // formCache use url.ParseQuery cached PostForm contains the parsed form data from POST, PATCH, + // or PUT body parameters. + formCache url.Values + + // SameSite allows a server to define a cookie attribute making it impossible for + // the browser to send this cookie along with cross-site requests. + sameSite http.SameSite } /************************************/ @@ -63,33 +89,69 @@ func (c *Context) reset() { c.Params = c.Params[0:0] c.handlers = nil c.index = -1 + + c.fullPath = "" c.Keys = nil c.Errors = c.Errors[0:0] c.Accepted = nil + c.queryCache = nil + c.formCache = nil + *c.params = (*c.params)[0:0] } // Copy returns a copy of the current context that can be safely used outside the request's scope. // This has to be used when the context has to be passed to a goroutine. func (c *Context) Copy() *Context { - var cp = *c + cp := Context{ + writermem: c.writermem, + Request: c.Request, + Params: c.Params, + engine: c.engine, + } cp.writermem.ResponseWriter = nil cp.Writer = &cp.writermem cp.index = abortIndex cp.handlers = nil + cp.Keys = map[string]interface{}{} + for k, v := range c.Keys { + cp.Keys[k] = v + } + paramCopy := make([]Param, len(cp.Params)) + copy(paramCopy, cp.Params) + cp.Params = paramCopy return &cp } -// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", this -// function will return "main.handleGetUsers" +// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", +// this function will return "main.handleGetUsers". func (c *Context) HandlerName() string { return nameOfFunction(c.handlers.Last()) } +// HandlerNames returns a list of all registered handlers for this context in descending order, +// following the semantics of HandlerName() +func (c *Context) HandlerNames() []string { + hn := make([]string, 0, len(c.handlers)) + for _, val := range c.handlers { + hn = append(hn, nameOfFunction(val)) + } + return hn +} + // Handler returns the main handler. func (c *Context) Handler() HandlerFunc { return c.handlers.Last() } +// FullPath returns a matched route full path. For not found routes +// returns an empty string. +// router.GET("/user/:id", func(c *gin.Context) { +// c.FullPath() == "/user/:id" // true +// }) +func (c *Context) FullPath() string { + return c.fullPath +} + /************************************/ /*********** FLOW CONTROL ***********/ /************************************/ @@ -99,9 +161,9 @@ func (c *Context) Handler() HandlerFunc { // See example in GitHub. func (c *Context) Next() { c.index++ - s := int8(len(c.handlers)) - for ; c.index < s; c.index++ { + for c.index < int8(len(c.handlers)) { c.handlers[c.index](c) + c.index++ } } @@ -111,8 +173,8 @@ func (c *Context) IsAborted() bool { } // Abort prevents pending handlers from being called. Note that this will not stop the current handler. -// Let's say you have an authorization middleware that validates that the current request is authorized. If the -// authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers +// Let's say you have an authorization middleware that validates that the current request is authorized. +// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers // for this request are not called. func (c *Context) Abort() { c.index = abortIndex @@ -126,15 +188,16 @@ func (c *Context) AbortWithStatus(code int) { c.Abort() } -// AbortWithStatusJSON calls `Abort()` and then `JSON` internally. This method stops the chain, writes the status code and return a JSON body +// AbortWithStatusJSON calls `Abort()` and then `JSON` internally. +// This method stops the chain, writes the status code and return a JSON body. // It also sets the Content-Type as "application/json". func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) { c.Abort() c.JSON(code, jsonObj) } -// AbortWithError calls `AbortWithStatus()` and `Error()` internally. This method stops the chain, writes the status code and -// pushes the specified error to `c.Errors`. +// AbortWithError calls `AbortWithStatus()` and `Error()` internally. +// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`. // See Context.Error() for more details. func (c *Context) AbortWithError(code int, err error) *Error { c.AbortWithStatus(code) @@ -145,21 +208,24 @@ func (c *Context) AbortWithError(code int, err error) *Error { /********* ERROR MANAGEMENT *********/ /************************************/ -// Attaches an error to the current context. The error is pushed to a list of errors. +// Error attaches an error to the current context. The error is pushed to a list of errors. // It's a good idea to call Error for each error that occurred during the resolution of a request. -// A middleware can be used to collect all the errors -// and push them to a database together, print a log, or append it in the HTTP response. +// A middleware can be used to collect all the errors and push them to a database together, +// print a log, or append it in the HTTP response. +// Error will panic if err is nil. func (c *Context) Error(err error) *Error { - var parsedError *Error - switch err.(type) { - case *Error: - parsedError = err.(*Error) - default: + if err == nil { + panic("err is nil") + } + + parsedError, ok := err.(*Error) + if !ok { parsedError = &Error{ Err: err, Type: ErrorTypePrivate, } } + c.Errors = append(c.Errors, parsedError) return parsedError } @@ -171,16 +237,21 @@ func (c *Context) Error(err error) *Error { // Set is used to store a new key/value pair exclusively for this context. // It also lazy initializes c.Keys if it was not used previously. func (c *Context) Set(key string, value interface{}) { + c.mu.Lock() if c.Keys == nil { c.Keys = make(map[string]interface{}) } + c.Keys[key] = value + c.mu.Unlock() } // Get returns the value for the given key, ie: (value, true). // If the value does not exists it returns (nil, false) func (c *Context) Get(key string) (value interface{}, exists bool) { + c.mu.RLock() value, exists = c.Keys[key] + c.mu.RUnlock() return } @@ -224,6 +295,22 @@ func (c *Context) GetInt64(key string) (i64 int64) { return } +// GetUint returns the value associated with the key as an unsigned integer. +func (c *Context) GetUint(key string) (ui uint) { + if val, ok := c.Get(key); ok && val != nil { + ui, _ = val.(uint) + } + return +} + +// GetUint64 returns the value associated with the key as an unsigned integer. +func (c *Context) GetUint64(key string) (ui64 uint64) { + if val, ok := c.Get(key); ok && val != nil { + ui64, _ = val.(uint64) + } + return +} + // GetFloat64 returns the value associated with the key as a float64. func (c *Context) GetFloat64(key string) (f64 float64) { if val, ok := c.Get(key); ok && val != nil { @@ -286,10 +373,10 @@ func (c *Context) GetStringMapStringSlice(key string) (smss map[string][]string) // Param returns the value of the URL param. // It is a shortcut for c.Params.ByName(key) -// router.GET("/user/:id", func(c *gin.Context) { -// // a GET request to /user/john -// id := c.Param("id") // id == "john" -// }) +// router.GET("/user/:id", func(c *gin.Context) { +// // a GET request to /user/john +// id := c.Param("id") // id == "john" +// }) func (c *Context) Param(key string) string { return c.Params.ByName(key) } @@ -297,11 +384,11 @@ func (c *Context) Param(key string) string { // Query returns the keyed url query value if it exists, // otherwise it returns an empty string `("")`. // It is shortcut for `c.Request.URL.Query().Get(key)` -// GET /path?id=1234&name=Manu&value= -// c.Query("id") == "1234" -// c.Query("name") == "Manu" -// c.Query("value") == "" -// c.Query("wtf") == "" +// GET /path?id=1234&name=Manu&value= +// c.Query("id") == "1234" +// c.Query("name") == "Manu" +// c.Query("value") == "" +// c.Query("wtf") == "" func (c *Context) Query(key string) string { value, _ := c.GetQuery(key) return value @@ -310,10 +397,10 @@ func (c *Context) Query(key string) string { // DefaultQuery returns the keyed url query value if it exists, // otherwise it returns the specified defaultValue string. // See: Query() and GetQuery() for further information. -// GET /?name=Manu&lastname= -// c.DefaultQuery("name", "unknown") == "Manu" -// c.DefaultQuery("id", "none") == "none" -// c.DefaultQuery("lastname", "none") == "" +// GET /?name=Manu&lastname= +// c.DefaultQuery("name", "unknown") == "Manu" +// c.DefaultQuery("id", "none") == "none" +// c.DefaultQuery("lastname", "none") == "" func (c *Context) DefaultQuery(key, defaultValue string) string { if value, ok := c.GetQuery(key); ok { return value @@ -325,10 +412,10 @@ func (c *Context) DefaultQuery(key, defaultValue string) string { // if it exists `(value, true)` (even when the value is an empty string), // otherwise it returns `("", false)`. // It is shortcut for `c.Request.URL.Query().Get(key)` -// GET /?name=Manu&lastname= -// ("Manu", true) == c.GetQuery("name") -// ("", false) == c.GetQuery("id") -// ("", true) == c.GetQuery("lastname") +// GET /?name=Manu&lastname= +// ("Manu", true) == c.GetQuery("name") +// ("", false) == c.GetQuery("id") +// ("", true) == c.GetQuery("lastname") func (c *Context) GetQuery(key string) (string, bool) { if values, ok := c.GetQueryArray(key); ok { return values[0], ok @@ -343,16 +430,39 @@ func (c *Context) QueryArray(key string) []string { return values } +func (c *Context) initQueryCache() { + if c.queryCache == nil { + if c.Request != nil { + c.queryCache = c.Request.URL.Query() + } else { + c.queryCache = url.Values{} + } + } +} + // GetQueryArray returns a slice of strings for a given query key, plus // a boolean value whether at least one value exists for the given key. func (c *Context) GetQueryArray(key string) ([]string, bool) { - req := c.Request - if values, ok := req.URL.Query()[key]; ok && len(values) > 0 { + c.initQueryCache() + if values, ok := c.queryCache[key]; ok && len(values) > 0 { return values, true } return []string{}, false } +// QueryMap returns a map for a given query key. +func (c *Context) QueryMap(key string) map[string]string { + dicts, _ := c.GetQueryMap(key) + return dicts +} + +// GetQueryMap returns a map for a given query key, plus a boolean value +// whether at least one value exists for the given key. +func (c *Context) GetQueryMap(key string) (map[string]string, bool) { + c.initQueryCache() + return c.get(c.queryCache, key) +} + // PostForm returns the specified key from a POST urlencoded form or multipart form // when it exists, otherwise it returns an empty string `("")`. func (c *Context) PostForm(key string) string { @@ -374,9 +484,9 @@ func (c *Context) DefaultPostForm(key, defaultValue string) string { // form or multipart form when it exists `(value, true)` (even when the value is an empty string), // otherwise it returns ("", false). // For example, during a PATCH request to update the user's email: -// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" -// email= --> ("", true) := GetPostForm("email") // set email to "" -// --> ("", false) := GetPostForm("email") // do nothing with email +// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" +// email= --> ("", true) := GetPostForm("email") // set email to "" +// --> ("", false) := GetPostForm("email") // do nothing with email func (c *Context) GetPostForm(key string) (string, bool) { if values, ok := c.GetPostFormArray(key); ok { return values[0], ok @@ -391,101 +501,304 @@ func (c *Context) PostFormArray(key string) []string { return values } +func (c *Context) initFormCache() { + if c.formCache == nil { + c.formCache = make(url.Values) + req := c.Request + if err := req.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { + if err != http.ErrNotMultipart { + debugPrint("error on parse multipart form array: %v", err) + } + } + c.formCache = req.PostForm + } +} + // GetPostFormArray returns a slice of strings for a given form key, plus // a boolean value whether at least one value exists for the given key. func (c *Context) GetPostFormArray(key string) ([]string, bool) { - req := c.Request - req.ParseForm() - req.ParseMultipartForm(defaultMemory) - if values := req.PostForm[key]; len(values) > 0 { + c.initFormCache() + if values := c.formCache[key]; len(values) > 0 { return values, true } - if req.MultipartForm != nil && req.MultipartForm.File != nil { - if values := req.MultipartForm.Value[key]; len(values) > 0 { - return values, true + return []string{}, false +} + +// PostFormMap returns a map for a given form key. +func (c *Context) PostFormMap(key string) map[string]string { + dicts, _ := c.GetPostFormMap(key) + return dicts +} + +// GetPostFormMap returns a map for a given form key, plus a boolean value +// whether at least one value exists for the given key. +func (c *Context) GetPostFormMap(key string) (map[string]string, bool) { + c.initFormCache() + return c.get(c.formCache, key) +} + +// get is an internal method and returns a map which satisfy conditions. +func (c *Context) get(m map[string][]string, key string) (map[string]string, bool) { + dicts := make(map[string]string) + exist := false + for k, v := range m { + if i := strings.IndexByte(k, '['); i >= 1 && k[0:i] == key { + if j := strings.IndexByte(k[i+1:], ']'); j >= 1 { + exist = true + dicts[k[i+1:][:j]] = v[0] + } } } - return []string{}, false + return dicts, exist } // FormFile returns the first file for the provided form key. func (c *Context) FormFile(name string) (*multipart.FileHeader, error) { - _, fh, err := c.Request.FormFile(name) + if c.Request.MultipartForm == nil { + if err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { + return nil, err + } + } + f, fh, err := c.Request.FormFile(name) + if err != nil { + return nil, err + } + f.Close() return fh, err } // MultipartForm is the parsed multipart form, including file uploads. func (c *Context) MultipartForm() (*multipart.Form, error) { - err := c.Request.ParseMultipartForm(defaultMemory) + err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory) return c.Request.MultipartForm, err } +// SaveUploadedFile uploads the form file to specific dst. +func (c *Context) SaveUploadedFile(file *multipart.FileHeader, dst string) error { + src, err := file.Open() + if err != nil { + return err + } + defer src.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, src) + return err +} + // Bind checks the Content-Type to select a binding engine automatically, // Depending the "Content-Type" header different bindings are used: -// "application/json" --> JSON binding -// "application/xml" --> XML binding -// otherwise --> returns an error +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// otherwise --> returns an error. // It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. // It decodes the json payload into the struct specified as a pointer. -// Like ParseBody() but this method also writes a 400 error if the json is not valid. +// It writes a 400 error and sets Content-Type header "text/plain" in the response if input is not valid. func (c *Context) Bind(obj interface{}) error { b := binding.Default(c.Request.Method, c.ContentType()) return c.MustBindWith(obj, b) } -// BindJSON is a shortcut for c.MustBindWith(obj, binding.JSON) +// BindJSON is a shortcut for c.MustBindWith(obj, binding.JSON). func (c *Context) BindJSON(obj interface{}) error { return c.MustBindWith(obj, binding.JSON) } -// MustBindWith binds the passed struct pointer using the specified binding -// engine. It will abort the request with HTTP 400 if any error ocurrs. +// BindXML is a shortcut for c.MustBindWith(obj, binding.BindXML). +func (c *Context) BindXML(obj interface{}) error { + return c.MustBindWith(obj, binding.XML) +} + +// BindQuery is a shortcut for c.MustBindWith(obj, binding.Query). +func (c *Context) BindQuery(obj interface{}) error { + return c.MustBindWith(obj, binding.Query) +} + +// BindYAML is a shortcut for c.MustBindWith(obj, binding.YAML). +func (c *Context) BindYAML(obj interface{}) error { + return c.MustBindWith(obj, binding.YAML) +} + +// BindHeader is a shortcut for c.MustBindWith(obj, binding.Header). +func (c *Context) BindHeader(obj interface{}) error { + return c.MustBindWith(obj, binding.Header) +} + +// BindUri binds the passed struct pointer using binding.Uri. +// It will abort the request with HTTP 400 if any error occurs. +func (c *Context) BindUri(obj interface{}) error { + if err := c.ShouldBindUri(obj); err != nil { + c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck + return err + } + return nil +} + +// MustBindWith binds the passed struct pointer using the specified binding engine. +// It will abort the request with HTTP 400 if any error occurs. // See the binding package. -func (c *Context) MustBindWith(obj interface{}, b binding.Binding) (err error) { - if err = c.ShouldBindWith(obj, b); err != nil { - c.AbortWithError(400, err).SetType(ErrorTypeBind) +func (c *Context) MustBindWith(obj interface{}, b binding.Binding) error { + if err := c.ShouldBindWith(obj, b); err != nil { + c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck + return err } + return nil +} - return +// ShouldBind checks the Content-Type to select a binding engine automatically, +// Depending the "Content-Type" header different bindings are used: +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// otherwise --> returns an error +// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. +// It decodes the json payload into the struct specified as a pointer. +// Like c.Bind() but this method does not set the response status code to 400 and abort if the json is not valid. +func (c *Context) ShouldBind(obj interface{}) error { + b := binding.Default(c.Request.Method, c.ContentType()) + return c.ShouldBindWith(obj, b) +} + +// ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON). +func (c *Context) ShouldBindJSON(obj interface{}) error { + return c.ShouldBindWith(obj, binding.JSON) +} + +// ShouldBindXML is a shortcut for c.ShouldBindWith(obj, binding.XML). +func (c *Context) ShouldBindXML(obj interface{}) error { + return c.ShouldBindWith(obj, binding.XML) +} + +// ShouldBindQuery is a shortcut for c.ShouldBindWith(obj, binding.Query). +func (c *Context) ShouldBindQuery(obj interface{}) error { + return c.ShouldBindWith(obj, binding.Query) +} + +// ShouldBindYAML is a shortcut for c.ShouldBindWith(obj, binding.YAML). +func (c *Context) ShouldBindYAML(obj interface{}) error { + return c.ShouldBindWith(obj, binding.YAML) +} + +// ShouldBindHeader is a shortcut for c.ShouldBindWith(obj, binding.Header). +func (c *Context) ShouldBindHeader(obj interface{}) error { + return c.ShouldBindWith(obj, binding.Header) +} + +// ShouldBindUri binds the passed struct pointer using the specified binding engine. +func (c *Context) ShouldBindUri(obj interface{}) error { + m := make(map[string][]string) + for _, v := range c.Params { + m[v.Key] = []string{v.Value} + } + return binding.Uri.BindUri(m, obj) } -// ShouldBindWith binds the passed struct pointer using the specified binding -// engine. +// ShouldBindWith binds the passed struct pointer using the specified binding engine. // See the binding package. func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error { return b.Bind(c.Request, obj) } -// ClientIP implements a best effort algorithm to return the real client IP, it parses -// X-Real-IP and X-Forwarded-For in order to work properly with reverse-proxies such us: nginx or haproxy. -// Use X-Forwarded-For before X-Real-Ip as nginx uses X-Real-Ip with the proxy's IP. -func (c *Context) ClientIP() string { - if c.engine.ForwardedByClientIP { - clientIP := c.requestHeader("X-Forwarded-For") - if index := strings.IndexByte(clientIP, ','); index >= 0 { - clientIP = clientIP[0:index] - } - clientIP = strings.TrimSpace(clientIP) - if len(clientIP) > 0 { - return clientIP +// ShouldBindBodyWith is similar with ShouldBindWith, but it stores the request +// body into the context, and reuse when it is called again. +// +// NOTE: This method reads the body before binding. So you should use +// ShouldBindWith for better performance if you need to call only once. +func (c *Context) ShouldBindBodyWith(obj interface{}, bb binding.BindingBody) (err error) { + var body []byte + if cb, ok := c.Get(BodyBytesKey); ok { + if cbb, ok := cb.([]byte); ok { + body = cbb } - clientIP = strings.TrimSpace(c.requestHeader("X-Real-Ip")) - if len(clientIP) > 0 { - return clientIP + } + if body == nil { + body, err = ioutil.ReadAll(c.Request.Body) + if err != nil { + return err } + c.Set(BodyBytesKey, body) } + return bb.BindBody(body, obj) +} +// ClientIP implements a best effort algorithm to return the real client IP. +// It called c.RemoteIP() under the hood, to check if the remote IP is a trusted proxy or not. +// If it's it will then try to parse the headers defined in Engine.RemoteIPHeaders (defaulting to [X-Forwarded-For, X-Real-Ip]). +// If the headers are nots syntactically valid OR the remote IP does not correspong to a trusted proxy, +// the remote IP (coming form Request.RemoteAddr) is returned. +func (c *Context) ClientIP() string { if c.engine.AppEngine { - if addr := c.Request.Header.Get("X-Appengine-Remote-Addr"); addr != "" { + if addr := c.requestHeader("X-Appengine-Remote-Addr"); addr != "" { return addr } } - if ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)); err == nil { - return ip + remoteIP, trusted := c.RemoteIP() + if remoteIP == nil { + return "" } - return "" + if trusted && c.engine.ForwardedByClientIP && c.engine.RemoteIPHeaders != nil { + for _, headerName := range c.engine.RemoteIPHeaders { + ip, valid := validateHeader(c.requestHeader(headerName)) + if valid { + return ip + } + } + } + return remoteIP.String() +} + +// RemoteIP parses the IP from Request.RemoteAddr, normalizes and returns the IP (without the port). +// It also checks if the remoteIP is a trusted proxy or not. +// In order to perform this validation, it will see if the IP is contained within at least one of the CIDR blocks +// defined in Engine.TrustedProxies +func (c *Context) RemoteIP() (net.IP, bool) { + ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)) + if err != nil { + return nil, false + } + remoteIP := net.ParseIP(ip) + if remoteIP == nil { + return nil, false + } + + if c.engine.trustedCIDRs != nil { + for _, cidr := range c.engine.trustedCIDRs { + if cidr.Contains(remoteIP) { + return remoteIP, true + } + } + } + + return remoteIP, false +} + +func validateHeader(header string) (clientIP string, valid bool) { + if header == "" { + return "", false + } + items := strings.Split(header, ",") + for i, ipStr := range items { + ipStr = strings.TrimSpace(ipStr) + ip := net.ParseIP(ipStr) + if ip == nil { + return "", false + } + + // We need to return the first IP in the list, but, + // we should not early return since we need to validate that + // the rest of the header is syntactically valid + if i == 0 { + clientIP = ipStr + valid = true + } + } + return } // ContentType returns the Content-Type header of the request. @@ -497,70 +810,68 @@ func (c *Context) ContentType() string { // handshake is being initiated by the client. func (c *Context) IsWebsocket() bool { if strings.Contains(strings.ToLower(c.requestHeader("Connection")), "upgrade") && - strings.ToLower(c.requestHeader("Upgrade")) == "websocket" { + strings.EqualFold(c.requestHeader("Upgrade"), "websocket") { return true } return false } func (c *Context) requestHeader(key string) string { - if values, _ := c.Request.Header[key]; len(values) > 0 { - return values[0] - } - return "" + return c.Request.Header.Get(key) } /************************************/ /******** RESPONSE RENDERING ********/ /************************************/ -// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function +// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function. func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: return false - case status == 204: + case status == http.StatusNoContent: return false - case status == 304: + case status == http.StatusNotModified: return false } return true } +// Status sets the HTTP response code. func (c *Context) Status(code int) { - c.writermem.WriteHeader(code) + c.Writer.WriteHeader(code) } -// Header is a intelligent shortcut for c.Writer.Header().Set(key, value) +// Header is a intelligent shortcut for c.Writer.Header().Set(key, value). // It writes a header in the response. // If value == "", this method removes the header `c.Writer.Header().Del(key)` func (c *Context) Header(key, value string) { - if len(value) == 0 { + if value == "" { c.Writer.Header().Del(key) - } else { - c.Writer.Header().Set(key, value) + return } + c.Writer.Header().Set(key, value) } -// GetHeader returns value from request headers +// GetHeader returns value from request headers. func (c *Context) GetHeader(key string) string { return c.requestHeader(key) } -// GetRawData return stream data +// GetRawData return stream data. func (c *Context) GetRawData() ([]byte, error) { return ioutil.ReadAll(c.Request.Body) } -func (c *Context) SetCookie( - name string, - value string, - maxAge int, - path string, - domain string, - secure bool, - httpOnly bool, -) { +// SetSameSite with cookie +func (c *Context) SetSameSite(samesite http.SameSite) { + c.sameSite = samesite +} + +// SetCookie adds a Set-Cookie header to the ResponseWriter's headers. +// The provided cookie must have a valid Name. Invalid cookies may be +// silently dropped. +func (c *Context) SetCookie(name, value string, maxAge int, path, domain string, secure, httpOnly bool) { if path == "" { path = "/" } @@ -570,11 +881,16 @@ func (c *Context) SetCookie( MaxAge: maxAge, Path: path, Domain: domain, + SameSite: c.sameSite, Secure: secure, HttpOnly: httpOnly, }) } +// Cookie returns the named cookie provided in the request or +// ErrNoCookie if not found. And return the named cookie is unescaped. +// If multiple cookies match the given name, only one cookie will +// be returned. func (c *Context) Cookie(name string) (string, error) { cookie, err := c.Request.Cookie(name) if err != nil { @@ -584,6 +900,7 @@ func (c *Context) Cookie(name string) (string, error) { return val, nil } +// Render writes the response headers and calls render.Render to render data. func (c *Context) Render(code int, r render.Render) { c.Status(code) @@ -614,12 +931,43 @@ func (c *Context) IndentedJSON(code int, obj interface{}) { c.Render(code, render.IndentedJSON{Data: obj}) } +// SecureJSON serializes the given struct as Secure JSON into the response body. +// Default prepends "while(1)," to response body if the given struct is array values. +// It also sets the Content-Type as "application/json". +func (c *Context) SecureJSON(code int, obj interface{}) { + c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj}) +} + +// JSONP serializes the given struct as JSON into the response body. +// It adds padding to response body to request data from a server residing in a different domain than the client. +// It also sets the Content-Type as "application/javascript". +func (c *Context) JSONP(code int, obj interface{}) { + callback := c.DefaultQuery("callback", "") + if callback == "" { + c.Render(code, render.JSON{Data: obj}) + return + } + c.Render(code, render.JsonpJSON{Callback: callback, Data: obj}) +} + // JSON serializes the given struct as JSON into the response body. // It also sets the Content-Type as "application/json". func (c *Context) JSON(code int, obj interface{}) { c.Render(code, render.JSON{Data: obj}) } +// AsciiJSON serializes the given struct as JSON into the response body with unicode to ASCII string. +// It also sets the Content-Type as "application/json". +func (c *Context) AsciiJSON(code int, obj interface{}) { + c.Render(code, render.AsciiJSON{Data: obj}) +} + +// PureJSON serializes the given struct as JSON into the response body. +// PureJSON, unlike JSON, does not replace special html characters with their unicode entities. +func (c *Context) PureJSON(code int, obj interface{}) { + c.Render(code, render.PureJSON{Data: obj}) +} + // XML serializes the given struct as XML into the response body. // It also sets the Content-Type as "application/xml". func (c *Context) XML(code int, obj interface{}) { @@ -631,6 +979,11 @@ func (c *Context) YAML(code int, obj interface{}) { c.Render(code, render.YAML{Data: obj}) } +// ProtoBuf serializes the given struct as ProtoBuf into the response body. +func (c *Context) ProtoBuf(code int, obj interface{}) { + c.Render(code, render.ProtoBuf{Data: obj}) +} + // String writes the given string into the response body. func (c *Context) String(code int, format string, values ...interface{}) { c.Render(code, render.String{Format: format, Data: values}) @@ -653,11 +1006,39 @@ func (c *Context) Data(code int, contentType string, data []byte) { }) } -// File writes the specified file into the body stream in a efficient way. +// DataFromReader writes the specified reader into the body stream and updates the HTTP code. +func (c *Context) DataFromReader(code int, contentLength int64, contentType string, reader io.Reader, extraHeaders map[string]string) { + c.Render(code, render.Reader{ + Headers: extraHeaders, + ContentType: contentType, + ContentLength: contentLength, + Reader: reader, + }) +} + +// File writes the specified file into the body stream in an efficient way. func (c *Context) File(filepath string) { http.ServeFile(c.Writer, c.Request, filepath) } +// FileFromFS writes the specified file from http.FileSystem into the body stream in an efficient way. +func (c *Context) FileFromFS(filepath string, fs http.FileSystem) { + defer func(old string) { + c.Request.URL.Path = old + }(c.Request.URL.Path) + + c.Request.URL.Path = filepath + + http.FileServer(fs).ServeHTTP(c.Writer, c.Request) +} + +// FileAttachment writes the specified file into the body stream in an efficient way +// On the client side, the file will typically be downloaded with the given filename +func (c *Context) FileAttachment(filepath, filename string) { + c.Writer.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename)) + http.ServeFile(c.Writer, c.Request, filepath) +} + // SSEvent writes a Server-Sent Event into the body stream. func (c *Context) SSEvent(name string, message interface{}) { c.Render(-1, sse.Event{ @@ -666,18 +1047,20 @@ func (c *Context) SSEvent(name string, message interface{}) { }) } -func (c *Context) Stream(step func(w io.Writer) bool) { +// Stream sends a streaming response and returns a boolean +// indicates "Is client disconnected in middle of stream" +func (c *Context) Stream(step func(w io.Writer) bool) bool { w := c.Writer clientGone := w.CloseNotify() for { select { case <-clientGone: - return + return true default: keepOpen := step(w) w.Flush() if !keepOpen { - return + return false } } } @@ -687,15 +1070,18 @@ func (c *Context) Stream(step func(w io.Writer) bool) { /******** CONTENT NEGOTIATION *******/ /************************************/ +// Negotiate contains all negotiations data. type Negotiate struct { Offered []string HTMLName string HTMLData interface{} JSONData interface{} XMLData interface{} + YAMLData interface{} Data interface{} } +// Negotiate calls different Render according acceptable Accept format. func (c *Context) Negotiate(code int, config Negotiate) { switch c.NegotiateFormat(config.Offered...) { case binding.MIMEJSON: @@ -710,11 +1096,16 @@ func (c *Context) Negotiate(code int, config Negotiate) { data := chooseData(config.XMLData, config.Data) c.XML(code, data) + case binding.MIMEYAML: + data := chooseData(config.YAMLData, config.Data) + c.YAML(code, data) + default: - c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) + c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) // nolint: errcheck } } +// NegotiateFormat returns an acceptable Accept format. func (c *Context) NegotiateFormat(offered ...string) string { assert1(len(offered) > 0, "you must provide at least one offer") @@ -725,15 +1116,27 @@ func (c *Context) NegotiateFormat(offered ...string) string { return offered[0] } for _, accepted := range c.Accepted { - for _, offert := range offered { - if accepted == offert { - return offert + for _, offer := range offered { + // According to RFC 2616 and RFC 2396, non-ASCII characters are not allowed in headers, + // therefore we can just iterate over the string without casting it into []rune + i := 0 + for ; i < len(accepted); i++ { + if accepted[i] == '*' || offer[i] == '*' { + return offer + } + if accepted[i] != offer[i] { + break + } + } + if i == len(accepted) { + return offer } } } return "" } +// SetAccepted sets Accept header data. func (c *Context) SetAccepted(formats ...string) { c.Accepted = formats } @@ -742,18 +1145,27 @@ func (c *Context) SetAccepted(formats ...string) { /***** GOLANG.ORG/X/NET/CONTEXT *****/ /************************************/ +// Deadline always returns that there is no deadline (ok==false), +// maybe you want to use Request.Context().Deadline() instead. func (c *Context) Deadline() (deadline time.Time, ok bool) { return } +// Done always returns nil (chan which will wait forever), +// if you want to abort your work when the connection was closed +// you should use Request.Context().Done() instead. func (c *Context) Done() <-chan struct{} { return nil } +// Err always returns nil, maybe you want to use Request.Context().Err() instead. func (c *Context) Err() error { return nil } +// Value returns the value associated with this context for key, or nil +// if no value is associated with key. Successive calls to Value with +// the same key returns the same result. func (c *Context) Value(key interface{}) interface{} { if key == 0 { return c.Request diff --git a/vendor/github.com/gin-gonic/gin/context_appengine.go b/vendor/github.com/gin-gonic/gin/context_appengine.go index 38c189a0b..d56584348 100644 --- a/vendor/github.com/gin-gonic/gin/context_appengine.go +++ b/vendor/github.com/gin-gonic/gin/context_appengine.go @@ -1,9 +1,10 @@ -// +build appengine - // Copyright 2017 Manu Martinez-Almeida. All rights reserved. // Use of this source code is governed by a MIT style // license that can be found in the LICENSE file. +//go:build appengine +// +build appengine + package gin func init() { diff --git a/vendor/github.com/gin-gonic/gin/debug.go b/vendor/github.com/gin-gonic/gin/debug.go index a121591a8..4c7cd0c39 100644 --- a/vendor/github.com/gin-gonic/gin/debug.go +++ b/vendor/github.com/gin-gonic/gin/debug.go @@ -5,32 +5,39 @@ package gin import ( - "bytes" + "fmt" "html/template" - "log" + "runtime" + "strconv" + "strings" ) -func init() { - log.SetFlags(0) -} +const ginSupportMinGoVer = 12 // IsDebugging returns true if the framework is running in debug mode. -// Use SetMode(gin.Release) to switch to disable the debug mode. +// Use SetMode(gin.ReleaseMode) to disable debug mode. func IsDebugging() bool { return ginMode == debugCode } +// DebugPrintRouteFunc indicates debug log output format. +var DebugPrintRouteFunc func(httpMethod, absolutePath, handlerName string, nuHandlers int) + func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) { if IsDebugging() { nuHandlers := len(handlers) handlerName := nameOfFunction(handlers.Last()) - debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers) + if DebugPrintRouteFunc == nil { + debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers) + } else { + DebugPrintRouteFunc(httpMethod, absolutePath, handlerName, nuHandlers) + } } } func debugPrintLoadTemplate(tmpl *template.Template) { if IsDebugging() { - var buf bytes.Buffer + var buf strings.Builder for _, tmpl := range tmpl.Templates() { buf.WriteString("\t- ") buf.WriteString(tmpl.Name()) @@ -42,10 +49,33 @@ func debugPrintLoadTemplate(tmpl *template.Template) { func debugPrint(format string, values ...interface{}) { if IsDebugging() { - log.Printf("[GIN-debug] "+format, values...) + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + fmt.Fprintf(DefaultWriter, "[GIN-debug] "+format, values...) } } +func getMinVer(v string) (uint64, error) { + first := strings.IndexByte(v, '.') + last := strings.LastIndexByte(v, '.') + if first == last { + return strconv.ParseUint(v[first+1:], 10, 64) + } + return strconv.ParseUint(v[first+1:last], 10, 64) +} + +func debugPrintWARNINGDefault() { + if v, e := getMinVer(runtime.Version()); e == nil && v <= ginSupportMinGoVer { + debugPrint(`[WARNING] Now Gin requires Go 1.12+. + +`) + } + debugPrint(`[WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. + +`) +} + func debugPrintWARNINGNew() { debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production. - using env: export GIN_MODE=release @@ -66,6 +96,8 @@ at initialization. ie. before any route is registered or the router is listening func debugPrintError(err error) { if err != nil { - debugPrint("[ERROR] %v\n", err) + if IsDebugging() { + fmt.Fprintf(DefaultErrorWriter, "[GIN-debug] [ERROR] %v\n", err) + } } } diff --git a/vendor/github.com/gin-gonic/gin/deprecated.go b/vendor/github.com/gin-gonic/gin/deprecated.go index 27e8f5583..ab4474296 100644 --- a/vendor/github.com/gin-gonic/gin/deprecated.go +++ b/vendor/github.com/gin-gonic/gin/deprecated.go @@ -5,21 +5,17 @@ package gin import ( - "github.com/gin-gonic/gin/binding" "log" -) -func (c *Context) GetCookie(name string) (string, error) { - log.Println("GetCookie() method is deprecated. Use Cookie() instead.") - return c.Cookie(name) -} + "github.com/gin-gonic/gin/binding" +) // BindWith binds the passed struct pointer using the specified binding engine. // See the binding package. func (c *Context) BindWith(obj interface{}, b binding.Binding) error { log.Println(`BindWith(\"interface{}, binding.Binding\") error is going to be deprecated, please check issue #662 and either use MustBindWith() if you - want HTTP 400 to be automatically returned if any error occur, of use + want HTTP 400 to be automatically returned if any error occur, or use ShouldBindWith() if you need to manage the error.`) return c.MustBindWith(obj, b) } diff --git a/vendor/github.com/gin-gonic/gin/doc.go b/vendor/github.com/gin-gonic/gin/doc.go new file mode 100644 index 000000000..1bd03864f --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/doc.go @@ -0,0 +1,6 @@ +/* +Package gin implements a HTTP web framework called gin. + +See https://gin-gonic.com/ for more information about gin. +*/ +package gin // import "github.com/gin-gonic/gin" diff --git a/vendor/github.com/gin-gonic/gin/errors.go b/vendor/github.com/gin-gonic/gin/errors.go index 896af6fc2..0f276c13d 100644 --- a/vendor/github.com/gin-gonic/gin/errors.go +++ b/vendor/github.com/gin-gonic/gin/errors.go @@ -5,48 +5,57 @@ package gin import ( - "bytes" - "encoding/json" "fmt" "reflect" + "strings" + + "github.com/gin-gonic/gin/internal/json" ) +// ErrorType is an unsigned 64-bit error code as defined in the gin spec. type ErrorType uint64 const ( - ErrorTypeBind ErrorType = 1 << 63 // used when c.Bind() fails - ErrorTypeRender ErrorType = 1 << 62 // used when c.Render() fails + // ErrorTypeBind is used when Context.Bind() fails. + ErrorTypeBind ErrorType = 1 << 63 + // ErrorTypeRender is used when Context.Render() fails. + ErrorTypeRender ErrorType = 1 << 62 + // ErrorTypePrivate indicates a private error. ErrorTypePrivate ErrorType = 1 << 0 - ErrorTypePublic ErrorType = 1 << 1 - + // ErrorTypePublic indicates a public error. + ErrorTypePublic ErrorType = 1 << 1 + // ErrorTypeAny indicates any other error. ErrorTypeAny ErrorType = 1<<64 - 1 - ErrorTypeNu = 2 + // ErrorTypeNu indicates any other error. + ErrorTypeNu = 2 ) -type ( - Error struct { - Err error - Type ErrorType - Meta interface{} - } +// Error represents a error's specification. +type Error struct { + Err error + Type ErrorType + Meta interface{} +} - errorMsgs []*Error -) +type errorMsgs []*Error var _ error = &Error{} +// SetType sets the error's type. func (msg *Error) SetType(flags ErrorType) *Error { msg.Type = flags return msg } +// SetMeta sets the error's meta data. func (msg *Error) SetMeta(data interface{}) *Error { msg.Meta = data return msg } +// JSON creates a properly formatted JSON func (msg *Error) JSON() interface{} { - json := H{} + jsonData := H{} if msg.Meta != nil { value := reflect.ValueOf(msg.Meta) switch value.Kind() { @@ -54,34 +63,40 @@ func (msg *Error) JSON() interface{} { return msg.Meta case reflect.Map: for _, key := range value.MapKeys() { - json[key.String()] = value.MapIndex(key).Interface() + jsonData[key.String()] = value.MapIndex(key).Interface() } default: - json["meta"] = msg.Meta + jsonData["meta"] = msg.Meta } } - if _, ok := json["error"]; !ok { - json["error"] = msg.Error() + if _, ok := jsonData["error"]; !ok { + jsonData["error"] = msg.Error() } - return json + return jsonData } -// MarshalJSON implements the json.Marshaller interface +// MarshalJSON implements the json.Marshaller interface. func (msg *Error) MarshalJSON() ([]byte, error) { return json.Marshal(msg.JSON()) } -// Implements the error interface +// Error implements the error interface. func (msg Error) Error() string { return msg.Err.Error() } +// IsType judges one error. func (msg *Error) IsType(flags ErrorType) bool { return (msg.Type & flags) > 0 } -// Returns a readonly copy filtered the byte. -// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic +// Unwrap returns the wrapped error, to allow interoperability with errors.Is(), errors.As() and errors.Unwrap() +func (msg *Error) Unwrap() error { + return msg.Err +} + +// ByType returns a readonly copy filtered the byte. +// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic. func (a errorMsgs) ByType(typ ErrorType) errorMsgs { if len(a) == 0 { return nil @@ -98,17 +113,16 @@ func (a errorMsgs) ByType(typ ErrorType) errorMsgs { return result } -// Returns the last error in the slice. It returns nil if the array is empty. -// Shortcut for errors[len(errors)-1] +// Last returns the last error in the slice. It returns nil if the array is empty. +// Shortcut for errors[len(errors)-1]. func (a errorMsgs) Last() *Error { - length := len(a) - if length > 0 { + if length := len(a); length > 0 { return a[length-1] } return nil } -// Returns an array will all the error messages. +// Errors returns an array will all the error messages. // Example: // c.Error(errors.New("first")) // c.Error(errors.New("second")) @@ -126,20 +140,21 @@ func (a errorMsgs) Errors() []string { } func (a errorMsgs) JSON() interface{} { - switch len(a) { + switch length := len(a); length { case 0: return nil case 1: return a.Last().JSON() default: - json := make([]interface{}, len(a)) + jsonData := make([]interface{}, length) for i, err := range a { - json[i] = err.JSON() + jsonData[i] = err.JSON() } - return json + return jsonData } } +// MarshalJSON implements the json.Marshaller interface. func (a errorMsgs) MarshalJSON() ([]byte, error) { return json.Marshal(a.JSON()) } @@ -148,9 +163,9 @@ func (a errorMsgs) String() string { if len(a) == 0 { return "" } - var buffer bytes.Buffer + var buffer strings.Builder for i, msg := range a { - fmt.Fprintf(&buffer, "Error #%02d: %s\n", (i + 1), msg.Err) + fmt.Fprintf(&buffer, "Error #%02d: %s\n", i+1, msg.Err) if msg.Meta != nil { fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta) } diff --git a/vendor/github.com/gin-gonic/gin/fs.go b/vendor/github.com/gin-gonic/gin/fs.go index 12645826a..007d9b755 100644 --- a/vendor/github.com/gin-gonic/gin/fs.go +++ b/vendor/github.com/gin-gonic/gin/fs.go @@ -9,14 +9,13 @@ import ( "os" ) -type ( - onlyfilesFS struct { - fs http.FileSystem - } - neuteredReaddirFile struct { - http.File - } -) +type onlyFilesFS struct { + fs http.FileSystem +} + +type neuteredReaddirFile struct { + http.File +} // Dir returns a http.Filesystem that can be used by http.FileServer(). It is used internally // in router.Static(). @@ -27,11 +26,11 @@ func Dir(root string, listDirectory bool) http.FileSystem { if listDirectory { return fs } - return &onlyfilesFS{fs} + return &onlyFilesFS{fs} } -// Conforms to http.Filesystem -func (fs onlyfilesFS) Open(name string) (http.File, error) { +// Open conforms to http.Filesystem. +func (fs onlyFilesFS) Open(name string) (http.File, error) { f, err := fs.fs.Open(name) if err != nil { return nil, err @@ -39,7 +38,7 @@ func (fs onlyfilesFS) Open(name string) (http.File, error) { return neuteredReaddirFile{f}, nil } -// Overrides the http.File default implementation +// Readdir overrides the http.File default implementation. func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) { // this disables directory listing return nil, nil diff --git a/vendor/github.com/gin-gonic/gin/gin.go b/vendor/github.com/gin-gonic/gin/gin.go index c4118a4ef..03a0e127e 100644 --- a/vendor/github.com/gin-gonic/gin/gin.go +++ b/vendor/github.com/gin-gonic/gin/gin.go @@ -5,95 +5,135 @@ package gin import ( + "fmt" "html/template" "net" "net/http" "os" + "path" + "strings" "sync" + "github.com/gin-gonic/gin/internal/bytesconv" "github.com/gin-gonic/gin/render" ) -// Version is Framework's version -const Version = "v1.2" +const defaultMultipartMemory = 32 << 20 // 32 MB + +var ( + default404Body = []byte("404 page not found") + default405Body = []byte("405 method not allowed") +) -var default404Body = []byte("404 page not found") -var default405Body = []byte("405 method not allowed") var defaultAppEngine bool +// HandlerFunc defines the handler used by gin middleware as return value. type HandlerFunc func(*Context) + +// HandlersChain defines a HandlerFunc array. type HandlersChain []HandlerFunc -// Last returns the last handler in the chain. ie. the last handler is the main own. +// Last returns the last handler in the chain. ie. the last handler is the main one. func (c HandlersChain) Last() HandlerFunc { - length := len(c) - if length > 0 { + if length := len(c); length > 0 { return c[length-1] } return nil } -type ( - RoutesInfo []RouteInfo - RouteInfo struct { - Method string - Path string - Handler string - } +// RouteInfo represents a request route's specification which contains method and path and its handler. +type RouteInfo struct { + Method string + Path string + Handler string + HandlerFunc HandlerFunc +} - // Engine is the framework's instance, it contains the muxer, middleware and configuration settings. - // Create an instance of Engine, by using New() or Default() - Engine struct { - RouterGroup - delims render.Delims - HTMLRender render.HTMLRender - FuncMap template.FuncMap - allNoRoute HandlersChain - allNoMethod HandlersChain - noRoute HandlersChain - noMethod HandlersChain - pool sync.Pool - trees methodTrees - - // Enables automatic redirection if the current route can't be matched but a - // handler for the path with (without) the trailing slash exists. - // For example if /foo/ is requested but a route only exists for /foo, the - // client is redirected to /foo with http status code 301 for GET requests - // and 307 for all other request methods. - RedirectTrailingSlash bool - - // If enabled, the router tries to fix the current request path, if no - // handle is registered for it. - // First superfluous path elements like ../ or // are removed. - // Afterwards the router does a case-insensitive lookup of the cleaned path. - // If a handle can be found for this route, the router makes a redirection - // to the corrected path with status code 301 for GET requests and 307 for - // all other request methods. - // For example /FOO and /..//Foo could be redirected to /foo. - // RedirectTrailingSlash is independent of this option. - RedirectFixedPath bool - - // If enabled, the router checks if another method is allowed for the - // current route, if the current request can not be routed. - // If this is the case, the request is answered with 'Method Not Allowed' - // and HTTP status code 405. - // If no other Method is allowed, the request is delegated to the NotFound - // handler. - HandleMethodNotAllowed bool - ForwardedByClientIP bool - - // #726 #755 If enabled, it will thrust some headers starting with - // 'X-AppEngine...' for better integration with that PaaS. - AppEngine bool - - // If enabled, the url.RawPath will be used to find parameters. - UseRawPath bool - // If true, the path value will be unescaped. - // If UseRawPath is false (by default), the UnescapePathValues effectively is true, - // as url.Path gonna be used, which is already unescaped. - UnescapePathValues bool - } -) +// RoutesInfo defines a RouteInfo array. +type RoutesInfo []RouteInfo + +// Engine is the framework's instance, it contains the muxer, middleware and configuration settings. +// Create an instance of Engine, by using New() or Default() +type Engine struct { + RouterGroup + + // Enables automatic redirection if the current route can't be matched but a + // handler for the path with (without) the trailing slash exists. + // For example if /foo/ is requested but a route only exists for /foo, the + // client is redirected to /foo with http status code 301 for GET requests + // and 307 for all other request methods. + RedirectTrailingSlash bool + + // If enabled, the router tries to fix the current request path, if no + // handle is registered for it. + // First superfluous path elements like ../ or // are removed. + // Afterwards the router does a case-insensitive lookup of the cleaned path. + // If a handle can be found for this route, the router makes a redirection + // to the corrected path with status code 301 for GET requests and 307 for + // all other request methods. + // For example /FOO and /..//Foo could be redirected to /foo. + // RedirectTrailingSlash is independent of this option. + RedirectFixedPath bool + + // If enabled, the router checks if another method is allowed for the + // current route, if the current request can not be routed. + // If this is the case, the request is answered with 'Method Not Allowed' + // and HTTP status code 405. + // If no other Method is allowed, the request is delegated to the NotFound + // handler. + HandleMethodNotAllowed bool + + // If enabled, client IP will be parsed from the request's headers that + // match those stored at `(*gin.Engine).RemoteIPHeaders`. If no IP was + // fetched, it falls back to the IP obtained from + // `(*gin.Context).Request.RemoteAddr`. + ForwardedByClientIP bool + + // List of headers used to obtain the client IP when + // `(*gin.Engine).ForwardedByClientIP` is `true` and + // `(*gin.Context).Request.RemoteAddr` is matched by at least one of the + // network origins of `(*gin.Engine).TrustedProxies`. + RemoteIPHeaders []string + + // List of network origins (IPv4 addresses, IPv4 CIDRs, IPv6 addresses or + // IPv6 CIDRs) from which to trust request's headers that contain + // alternative client IP when `(*gin.Engine).ForwardedByClientIP` is + // `true`. + TrustedProxies []string + + // #726 #755 If enabled, it will trust some headers starting with + // 'X-AppEngine...' for better integration with that PaaS. + AppEngine bool + + // If enabled, the url.RawPath will be used to find parameters. + UseRawPath bool + + // If true, the path value will be unescaped. + // If UseRawPath is false (by default), the UnescapePathValues effectively is true, + // as url.Path gonna be used, which is already unescaped. + UnescapePathValues bool + + // Value of 'maxMemory' param that is given to http.Request's ParseMultipartForm + // method call. + MaxMultipartMemory int64 + + // RemoveExtraSlash a parameter can be parsed from the URL even with extra slashes. + // See the PR #1817 and issue #1644 + RemoveExtraSlash bool + + delims render.Delims + secureJSONPrefix string + HTMLRender render.HTMLRender + FuncMap template.FuncMap + allNoRoute HandlersChain + allNoMethod HandlersChain + noRoute HandlersChain + noMethod HandlersChain + pool sync.Pool + trees methodTrees + maxParams uint16 + trustedCIDRs []*net.IPNet +} var _ IRouter = &Engine{} @@ -118,11 +158,16 @@ func New() *Engine { RedirectFixedPath: false, HandleMethodNotAllowed: false, ForwardedByClientIP: true, + RemoteIPHeaders: []string{"X-Forwarded-For", "X-Real-IP"}, + TrustedProxies: []string{"0.0.0.0/0"}, AppEngine: defaultAppEngine, UseRawPath: false, + RemoveExtraSlash: false, UnescapePathValues: true, + MaxMultipartMemory: defaultMultipartMemory, trees: make(methodTrees, 0, 9), - delims: render.Delims{"{{", "}}"}, + delims: render.Delims{Left: "{{", Right: "}}"}, + secureJSONPrefix: "while(1);", } engine.RouterGroup.engine = engine engine.pool.New = func() interface{} { @@ -133,39 +178,58 @@ func New() *Engine { // Default returns an Engine instance with the Logger and Recovery middleware already attached. func Default() *Engine { + debugPrintWARNINGDefault() engine := New() engine.Use(Logger(), Recovery()) return engine } func (engine *Engine) allocateContext() *Context { - return &Context{engine: engine} + v := make(Params, 0, engine.maxParams) + return &Context{engine: engine, params: &v} } +// Delims sets template left and right delims and returns a Engine instance. func (engine *Engine) Delims(left, right string) *Engine { - engine.delims = render.Delims{left, right} + engine.delims = render.Delims{Left: left, Right: right} + return engine +} + +// SecureJsonPrefix sets the secureJSONPrefix used in Context.SecureJSON. +func (engine *Engine) SecureJsonPrefix(prefix string) *Engine { + engine.secureJSONPrefix = prefix return engine } +// LoadHTMLGlob loads HTML files identified by glob pattern +// and associates the result with HTML renderer. func (engine *Engine) LoadHTMLGlob(pattern string) { + left := engine.delims.Left + right := engine.delims.Right + templ := template.Must(template.New("").Delims(left, right).Funcs(engine.FuncMap).ParseGlob(pattern)) + if IsDebugging() { - debugPrintLoadTemplate(template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseGlob(pattern))) + debugPrintLoadTemplate(templ) engine.HTMLRender = render.HTMLDebug{Glob: pattern, FuncMap: engine.FuncMap, Delims: engine.delims} - } else { - templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseGlob(pattern)) - engine.SetHTMLTemplate(templ) + return } + + engine.SetHTMLTemplate(templ) } +// LoadHTMLFiles loads a slice of HTML files +// and associates the result with HTML renderer. func (engine *Engine) LoadHTMLFiles(files ...string) { if IsDebugging() { engine.HTMLRender = render.HTMLDebug{Files: files, FuncMap: engine.FuncMap, Delims: engine.delims} - } else { - templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseFiles(files...)) - engine.SetHTMLTemplate(templ) + return } + + templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseFiles(files...)) + engine.SetHTMLTemplate(templ) } +// SetHTMLTemplate associate a template with HTML renderer. func (engine *Engine) SetHTMLTemplate(templ *template.Template) { if len(engine.trees) > 0 { debugPrintWARNINGSetHTMLTemplate() @@ -174,6 +238,7 @@ func (engine *Engine) SetHTMLTemplate(templ *template.Template) { engine.HTMLRender = render.HTMLProduction{Template: templ.Funcs(engine.FuncMap)} } +// SetFuncMap sets the FuncMap used for template.FuncMap. func (engine *Engine) SetFuncMap(funcMap template.FuncMap) { engine.FuncMap = funcMap } @@ -184,13 +249,13 @@ func (engine *Engine) NoRoute(handlers ...HandlerFunc) { engine.rebuild404Handlers() } -// NoMethod sets the handlers called when... TODO +// NoMethod sets the handlers called when... TODO. func (engine *Engine) NoMethod(handlers ...HandlerFunc) { engine.noMethod = handlers engine.rebuild405Handlers() } -// Use attachs a global middleware to the router. ie. the middleware attached though Use() will be +// Use attaches a global middleware to the router. ie. the middleware attached though Use() will be // included in the handlers chain for every single request. Even 404, 405, static files... // For example, this is the right place for a logger or error management middleware. func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes { @@ -210,16 +275,23 @@ func (engine *Engine) rebuild405Handlers() { func (engine *Engine) addRoute(method, path string, handlers HandlersChain) { assert1(path[0] == '/', "path must begin with '/'") - assert1(len(method) > 0, "HTTP method can not be empty") + assert1(method != "", "HTTP method can not be empty") assert1(len(handlers) > 0, "there must be at least one handler") debugPrintRoute(method, path, handlers) + root := engine.trees.get(method) if root == nil { root = new(node) + root.fullPath = "/" engine.trees = append(engine.trees, methodTree{method: method, root: root}) } root.addRoute(path, handlers) + + // Update maxParams + if paramsCount := countParams(path); paramsCount > engine.maxParams { + engine.maxParams = paramsCount + } } // Routes returns a slice of registered routes, including some useful information, such as: @@ -234,10 +306,12 @@ func (engine *Engine) Routes() (routes RoutesInfo) { func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo { path += root.path if len(root.handlers) > 0 { + handlerFunc := root.handlers.Last() routes = append(routes, RouteInfo{ - Method: method, - Path: path, - Handler: nameOfFunction(root.handlers.Last()), + Method: method, + Path: path, + Handler: nameOfFunction(handlerFunc), + HandlerFunc: handlerFunc, }) } for _, child := range root.children { @@ -252,16 +326,64 @@ func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo { func (engine *Engine) Run(addr ...string) (err error) { defer func() { debugPrintError(err) }() + trustedCIDRs, err := engine.prepareTrustedCIDRs() + if err != nil { + return err + } + engine.trustedCIDRs = trustedCIDRs address := resolveAddress(addr) debugPrint("Listening and serving HTTP on %s\n", address) err = http.ListenAndServe(address, engine) return } +func (engine *Engine) prepareTrustedCIDRs() ([]*net.IPNet, error) { + if engine.TrustedProxies == nil { + return nil, nil + } + + cidr := make([]*net.IPNet, 0, len(engine.TrustedProxies)) + for _, trustedProxy := range engine.TrustedProxies { + if !strings.Contains(trustedProxy, "/") { + ip := parseIP(trustedProxy) + if ip == nil { + return cidr, &net.ParseError{Type: "IP address", Text: trustedProxy} + } + + switch len(ip) { + case net.IPv4len: + trustedProxy += "/32" + case net.IPv6len: + trustedProxy += "/128" + } + } + _, cidrNet, err := net.ParseCIDR(trustedProxy) + if err != nil { + return cidr, err + } + cidr = append(cidr, cidrNet) + } + return cidr, nil +} + +// parseIP parse a string representation of an IP and returns a net.IP with the +// minimum byte representation or nil if input is invalid. +func parseIP(ip string) net.IP { + parsedIP := net.ParseIP(ip) + + if ipv4 := parsedIP.To4(); ipv4 != nil { + // return ip in a 4-byte representation + return ipv4 + } + + // return ip in a 16-byte representation or nil + return parsedIP +} + // RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests. // It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router) // Note: this method will block the calling goroutine indefinitely unless an error happens. -func (engine *Engine) RunTLS(addr string, certFile string, keyFile string) (err error) { +func (engine *Engine) RunTLS(addr, certFile, keyFile string) (err error) { debugPrint("Listening and serving HTTPS on %s\n", addr) defer func() { debugPrintError(err) }() @@ -276,17 +398,44 @@ func (engine *Engine) RunUnix(file string) (err error) { debugPrint("Listening and serving HTTP on unix:/%s", file) defer func() { debugPrintError(err) }() - os.Remove(file) listener, err := net.Listen("unix", file) if err != nil { return } defer listener.Close() + defer os.Remove(file) + + err = http.Serve(listener, engine) + return +} + +// RunFd attaches the router to a http.Server and starts listening and serving HTTP requests +// through the specified file descriptor. +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunFd(fd int) (err error) { + debugPrint("Listening and serving HTTP on fd@%d", fd) + defer func() { debugPrintError(err) }() + + f := os.NewFile(uintptr(fd), fmt.Sprintf("fd@%d", fd)) + listener, err := net.FileListener(f) + if err != nil { + return + } + defer listener.Close() + err = engine.RunListener(listener) + return +} + +// RunListener attaches the router to a http.Server and starts listening and serving HTTP requests +// through the specified net.Listener +func (engine *Engine) RunListener(listener net.Listener) (err error) { + debugPrint("Listening and serving HTTP on listener what's bind with address@%s", listener.Addr()) + defer func() { debugPrintError(err) }() err = http.Serve(listener, engine) return } -// Conforms to the http.Handler interface. +// ServeHTTP conforms to the http.Handler interface. func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) { c := engine.pool.Get().(*Context) c.writermem.reset(w) @@ -298,68 +447,75 @@ func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) { engine.pool.Put(c) } -// Re-enter a context that has been rewritten. -// This can be done by setting c.Request.Path to your new target. +// HandleContext re-enter a context that has been rewritten. +// This can be done by setting c.Request.URL.Path to your new target. // Disclaimer: You can loop yourself to death with this, use wisely. func (engine *Engine) HandleContext(c *Context) { + oldIndexValue := c.index c.reset() engine.handleHTTPRequest(c) - engine.pool.Put(c) + + c.index = oldIndexValue } -func (engine *Engine) handleHTTPRequest(context *Context) { - httpMethod := context.Request.Method - var path string - var unescape bool - if engine.UseRawPath && len(context.Request.URL.RawPath) > 0 { - path = context.Request.URL.RawPath +func (engine *Engine) handleHTTPRequest(c *Context) { + httpMethod := c.Request.Method + rPath := c.Request.URL.Path + unescape := false + if engine.UseRawPath && len(c.Request.URL.RawPath) > 0 { + rPath = c.Request.URL.RawPath unescape = engine.UnescapePathValues - } else { - path = context.Request.URL.Path - unescape = false + } + + if engine.RemoveExtraSlash { + rPath = cleanPath(rPath) } // Find root of the tree for the given HTTP method t := engine.trees for i, tl := 0, len(t); i < tl; i++ { - if t[i].method == httpMethod { - root := t[i].root - // Find route in tree - handlers, params, tsr := root.getValue(path, context.Params, unescape) - if handlers != nil { - context.handlers = handlers - context.Params = params - context.Next() - context.writermem.WriteHeaderNow() + if t[i].method != httpMethod { + continue + } + root := t[i].root + // Find route in tree + value := root.getValue(rPath, c.params, unescape) + if value.params != nil { + c.Params = *value.params + } + if value.handlers != nil { + c.handlers = value.handlers + c.fullPath = value.fullPath + c.Next() + c.writermem.WriteHeaderNow() + return + } + if httpMethod != "CONNECT" && rPath != "/" { + if value.tsr && engine.RedirectTrailingSlash { + redirectTrailingSlash(c) return } - if httpMethod != "CONNECT" && path != "/" { - if tsr && engine.RedirectTrailingSlash { - redirectTrailingSlash(context) - return - } - if engine.RedirectFixedPath && redirectFixedPath(context, root, engine.RedirectFixedPath) { - return - } + if engine.RedirectFixedPath && redirectFixedPath(c, root, engine.RedirectFixedPath) { + return } - break } + break } - // TODO: unit test if engine.HandleMethodNotAllowed { for _, tree := range engine.trees { - if tree.method != httpMethod { - if handlers, _, _ := tree.root.getValue(path, nil, unescape); handlers != nil { - context.handlers = engine.allNoMethod - serveError(context, 405, default405Body) - return - } + if tree.method == httpMethod { + continue + } + if value := tree.root.getValue(rPath, nil, unescape); value.handlers != nil { + c.handlers = engine.allNoMethod + serveError(c, http.StatusMethodNotAllowed, default405Body) + return } } } - context.handlers = engine.allNoRoute - serveError(context, 404, default404Body) + c.handlers = engine.allNoRoute + serveError(c, http.StatusNotFound, default404Body) } var mimePlain = []string{MIMEPlain} @@ -367,52 +523,55 @@ var mimePlain = []string{MIMEPlain} func serveError(c *Context, code int, defaultMessage []byte) { c.writermem.status = code c.Next() - if !c.writermem.Written() { - if c.writermem.Status() == code { - c.writermem.Header()["Content-Type"] = mimePlain - c.Writer.Write(defaultMessage) - } else { - c.writermem.WriteHeaderNow() + if c.writermem.Written() { + return + } + if c.writermem.Status() == code { + c.writermem.Header()["Content-Type"] = mimePlain + _, err := c.Writer.Write(defaultMessage) + if err != nil { + debugPrint("cannot write message to writer during serve error: %v", err) } + return } + c.writermem.WriteHeaderNow() } func redirectTrailingSlash(c *Context) { req := c.Request - path := req.URL.Path - code := 301 // Permanent redirect, request with GET method - if req.Method != "GET" { - code = 307 + p := req.URL.Path + if prefix := path.Clean(c.Request.Header.Get("X-Forwarded-Prefix")); prefix != "." { + p = prefix + "/" + req.URL.Path } - - if len(path) > 1 && path[len(path)-1] == '/' { - req.URL.Path = path[:len(path)-1] - } else { - req.URL.Path = path + "/" + req.URL.Path = p + "/" + if length := len(p); length > 1 && p[length-1] == '/' { + req.URL.Path = p[:length-1] } - debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String()) - http.Redirect(c.Writer, req, req.URL.String(), code) - c.writermem.WriteHeaderNow() + redirectRequest(c) } func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool { req := c.Request - path := req.URL.Path - - fixedPath, found := root.findCaseInsensitivePath( - cleanPath(path), - trailingSlash, - ) - if found { - code := 301 // Permanent redirect, request with GET method - if req.Method != "GET" { - code = 307 - } - req.URL.Path = string(fixedPath) - debugPrint("redirecting request %d: %s --> %s", code, path, req.URL.String()) - http.Redirect(c.Writer, req, req.URL.String(), code) - c.writermem.WriteHeaderNow() + rPath := req.URL.Path + + if fixedPath, ok := root.findCaseInsensitivePath(cleanPath(rPath), trailingSlash); ok { + req.URL.Path = bytesconv.BytesToString(fixedPath) + redirectRequest(c) return true } return false } + +func redirectRequest(c *Context) { + req := c.Request + rPath := req.URL.Path + rURL := req.URL.String() + + code := http.StatusMovedPermanently // Permanent redirect, request with GET method + if req.Method != http.MethodGet { + code = http.StatusTemporaryRedirect + } + debugPrint("redirecting request %d: %s --> %s", code, rPath, rURL) + http.Redirect(c.Writer, req, rURL, code) + c.writermem.WriteHeaderNow() +} diff --git a/vendor/github.com/gin-gonic/gin/go.mod b/vendor/github.com/gin-gonic/gin/go.mod new file mode 100644 index 000000000..884ff8517 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/go.mod @@ -0,0 +1,14 @@ +module github.com/gin-gonic/gin + +go 1.13 + +require ( + github.com/gin-contrib/sse v0.1.0 + github.com/go-playground/validator/v10 v10.4.1 + github.com/golang/protobuf v1.3.3 + github.com/json-iterator/go v1.1.9 + github.com/mattn/go-isatty v0.0.12 + github.com/stretchr/testify v1.4.0 + github.com/ugorji/go/codec v1.1.7 + gopkg.in/yaml.v2 v2.2.8 +) diff --git a/vendor/github.com/gin-gonic/gin/go.sum b/vendor/github.com/gin-gonic/gin/go.sum new file mode 100644 index 000000000..a64b33192 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/go.sum @@ -0,0 +1,52 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go b/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go new file mode 100644 index 000000000..86e4c4d44 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go @@ -0,0 +1,24 @@ +// Copyright 2020 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package bytesconv + +import ( + "unsafe" +) + +// StringToBytes converts string to byte slice without a memory allocation. +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} + +// BytesToString converts byte slice to string without a memory allocation. +func BytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/vendor/github.com/gin-gonic/gin/internal/json/json.go b/vendor/github.com/gin-gonic/gin/internal/json/json.go new file mode 100644 index 000000000..172aeb241 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/internal/json/json.go @@ -0,0 +1,23 @@ +// Copyright 2017 Bo-Yi Wu. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build !jsoniter +// +build !jsoniter + +package json + +import "encoding/json" + +var ( + // Marshal is exported by gin/json package. + Marshal = json.Marshal + // Unmarshal is exported by gin/json package. + Unmarshal = json.Unmarshal + // MarshalIndent is exported by gin/json package. + MarshalIndent = json.MarshalIndent + // NewDecoder is exported by gin/json package. + NewDecoder = json.NewDecoder + // NewEncoder is exported by gin/json package. + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go b/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go new file mode 100644 index 000000000..232f8dcad --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go @@ -0,0 +1,24 @@ +// Copyright 2017 Bo-Yi Wu. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build jsoniter +// +build jsoniter + +package json + +import jsoniter "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + // Marshal is exported by gin/json package. + Marshal = json.Marshal + // Unmarshal is exported by gin/json package. + Unmarshal = json.Unmarshal + // MarshalIndent is exported by gin/json package. + MarshalIndent = json.MarshalIndent + // NewDecoder is exported by gin/json package. + NewDecoder = json.NewDecoder + // NewEncoder is exported by gin/json package. + NewEncoder = json.NewEncoder +) diff --git a/vendor/github.com/gin-gonic/gin/logger.go b/vendor/github.com/gin-gonic/gin/logger.go index dc6f14154..d361b74d3 100644 --- a/vendor/github.com/gin-gonic/gin/logger.go +++ b/vendor/github.com/gin-gonic/gin/logger.go @@ -7,32 +7,167 @@ package gin import ( "fmt" "io" + "net/http" "os" "time" "github.com/mattn/go-isatty" ) -var ( - green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109}) - white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109}) - yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109}) - red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109}) - blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109}) - magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109}) - cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109}) - reset = string([]byte{27, 91, 48, 109}) - disableColor = false +type consoleColorModeValue int + +const ( + autoColor consoleColorModeValue = iota + disableColor + forceColor +) + +const ( + green = "\033[97;42m" + white = "\033[90;47m" + yellow = "\033[90;43m" + red = "\033[97;41m" + blue = "\033[97;44m" + magenta = "\033[97;45m" + cyan = "\033[97;46m" + reset = "\033[0m" ) +var consoleColorMode = autoColor + +// LoggerConfig defines the config for Logger middleware. +type LoggerConfig struct { + // Optional. Default value is gin.defaultLogFormatter + Formatter LogFormatter + + // Output is a writer where logs are written. + // Optional. Default value is gin.DefaultWriter. + Output io.Writer + + // SkipPaths is a url path array which logs are not written. + // Optional. + SkipPaths []string +} + +// LogFormatter gives the signature of the formatter function passed to LoggerWithFormatter +type LogFormatter func(params LogFormatterParams) string + +// LogFormatterParams is the structure any formatter will be handed when time to log comes +type LogFormatterParams struct { + Request *http.Request + + // TimeStamp shows the time after the server returns a response. + TimeStamp time.Time + // StatusCode is HTTP response code. + StatusCode int + // Latency is how much time the server cost to process a certain request. + Latency time.Duration + // ClientIP equals Context's ClientIP method. + ClientIP string + // Method is the HTTP method given to the request. + Method string + // Path is a path the client requests. + Path string + // ErrorMessage is set if error has occurred in processing the request. + ErrorMessage string + // isTerm shows whether does gin's output descriptor refers to a terminal. + isTerm bool + // BodySize is the size of the Response Body + BodySize int + // Keys are the keys set on the request's context. + Keys map[string]interface{} +} + +// StatusCodeColor is the ANSI color for appropriately logging http status code to a terminal. +func (p *LogFormatterParams) StatusCodeColor() string { + code := p.StatusCode + + switch { + case code >= http.StatusOK && code < http.StatusMultipleChoices: + return green + case code >= http.StatusMultipleChoices && code < http.StatusBadRequest: + return white + case code >= http.StatusBadRequest && code < http.StatusInternalServerError: + return yellow + default: + return red + } +} + +// MethodColor is the ANSI color for appropriately logging http method to a terminal. +func (p *LogFormatterParams) MethodColor() string { + method := p.Method + + switch method { + case http.MethodGet: + return blue + case http.MethodPost: + return cyan + case http.MethodPut: + return yellow + case http.MethodDelete: + return red + case http.MethodPatch: + return green + case http.MethodHead: + return magenta + case http.MethodOptions: + return white + default: + return reset + } +} + +// ResetColor resets all escape attributes. +func (p *LogFormatterParams) ResetColor() string { + return reset +} + +// IsOutputColor indicates whether can colors be outputted to the log. +func (p *LogFormatterParams) IsOutputColor() bool { + return consoleColorMode == forceColor || (consoleColorMode == autoColor && p.isTerm) +} + +// defaultLogFormatter is the default log format function Logger middleware uses. +var defaultLogFormatter = func(param LogFormatterParams) string { + var statusColor, methodColor, resetColor string + if param.IsOutputColor() { + statusColor = param.StatusCodeColor() + methodColor = param.MethodColor() + resetColor = param.ResetColor() + } + + if param.Latency > time.Minute { + // Truncate in a golang < 1.8 safe way + param.Latency = param.Latency - param.Latency%time.Second + } + return fmt.Sprintf("[GIN] %v |%s %3d %s| %13v | %15s |%s %-7s %s %#v\n%s", + param.TimeStamp.Format("2006/01/02 - 15:04:05"), + statusColor, param.StatusCode, resetColor, + param.Latency, + param.ClientIP, + methodColor, param.Method, resetColor, + param.Path, + param.ErrorMessage, + ) +} + +// DisableConsoleColor disables color output in the console. func DisableConsoleColor() { - disableColor = true + consoleColorMode = disableColor } +// ForceConsoleColor force color output in the console. +func ForceConsoleColor() { + consoleColorMode = forceColor +} + +// ErrorLogger returns a handlerfunc for any error type. func ErrorLogger() HandlerFunc { return ErrorLoggerT(ErrorTypeAny) } +// ErrorLoggerT returns a handlerfunc for a given error type. func ErrorLoggerT(typ ErrorType) HandlerFunc { return func(c *Context) { c.Next() @@ -43,20 +178,46 @@ func ErrorLoggerT(typ ErrorType) HandlerFunc { } } -// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter -// By default gin.DefaultWriter = os.Stdout +// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter. +// By default gin.DefaultWriter = os.Stdout. func Logger() HandlerFunc { - return LoggerWithWriter(DefaultWriter) + return LoggerWithConfig(LoggerConfig{}) } -// LoggerWithWriter instance a Logger middleware with the specified writter buffer. +// LoggerWithFormatter instance a Logger middleware with the specified log format function. +func LoggerWithFormatter(f LogFormatter) HandlerFunc { + return LoggerWithConfig(LoggerConfig{ + Formatter: f, + }) +} + +// LoggerWithWriter instance a Logger middleware with the specified writer buffer. // Example: os.Stdout, a file opened in write mode, a socket... func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc { + return LoggerWithConfig(LoggerConfig{ + Output: out, + SkipPaths: notlogged, + }) +} + +// LoggerWithConfig instance a Logger middleware with config. +func LoggerWithConfig(conf LoggerConfig) HandlerFunc { + formatter := conf.Formatter + if formatter == nil { + formatter = defaultLogFormatter + } + + out := conf.Output + if out == nil { + out = DefaultWriter + } + + notlogged := conf.SkipPaths + isTerm := true - if w, ok := out.(*os.File); !ok || - (os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd()))) || - disableColor { + if w, ok := out.(*os.File); !ok || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd())) { isTerm = false } @@ -74,69 +235,37 @@ func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc { // Start timer start := time.Now() path := c.Request.URL.Path + raw := c.Request.URL.RawQuery // Process request c.Next() // Log only when path is not being skipped if _, ok := skip[path]; !ok { + param := LogFormatterParams{ + Request: c.Request, + isTerm: isTerm, + Keys: c.Keys, + } + // Stop timer - end := time.Now() - latency := end.Sub(start) - - clientIP := c.ClientIP() - method := c.Request.Method - statusCode := c.Writer.Status() - var statusColor, methodColor string - if isTerm { - statusColor = colorForStatus(statusCode) - methodColor = colorForMethod(method) + param.TimeStamp = time.Now() + param.Latency = param.TimeStamp.Sub(start) + + param.ClientIP = c.ClientIP() + param.Method = c.Request.Method + param.StatusCode = c.Writer.Status() + param.ErrorMessage = c.Errors.ByType(ErrorTypePrivate).String() + + param.BodySize = c.Writer.Size() + + if raw != "" { + path = path + "?" + raw } - comment := c.Errors.ByType(ErrorTypePrivate).String() - - fmt.Fprintf(out, "[GIN] %v |%s %3d %s| %13v | %15s |%s %s %-7s %s\n%s", - end.Format("2006/01/02 - 15:04:05"), - statusColor, statusCode, reset, - latency, - clientIP, - methodColor, method, reset, - path, - comment, - ) - } - } -} -func colorForStatus(code int) string { - switch { - case code >= 200 && code < 300: - return green - case code >= 300 && code < 400: - return white - case code >= 400 && code < 500: - return yellow - default: - return red - } -} + param.Path = path -func colorForMethod(method string) string { - switch method { - case "GET": - return blue - case "POST": - return cyan - case "PUT": - return yellow - case "DELETE": - return red - case "PATCH": - return green - case "HEAD": - return magenta - case "OPTIONS": - return white - default: - return reset + fmt.Fprint(out, formatter(param)) + } } } diff --git a/vendor/github.com/gin-gonic/gin/logo.jpg b/vendor/github.com/gin-gonic/gin/logo.jpg deleted file mode 100644 index bb51852e4..000000000 Binary files a/vendor/github.com/gin-gonic/gin/logo.jpg and /dev/null differ diff --git a/vendor/github.com/gin-gonic/gin/mode.go b/vendor/github.com/gin-gonic/gin/mode.go index e24dbdc2c..c8813aff2 100644 --- a/vendor/github.com/gin-gonic/gin/mode.go +++ b/vendor/github.com/gin-gonic/gin/mode.go @@ -11,20 +11,25 @@ import ( "github.com/gin-gonic/gin/binding" ) -const ENV_GIN_MODE = "GIN_MODE" +// EnvGinMode indicates environment name for gin mode. +const EnvGinMode = "GIN_MODE" const ( - DebugMode string = "debug" - ReleaseMode string = "release" - TestMode string = "test" + // DebugMode indicates gin mode is debug. + DebugMode = "debug" + // ReleaseMode indicates gin mode is release. + ReleaseMode = "release" + // TestMode indicates gin mode is test. + TestMode = "test" ) + const ( debugCode = iota releaseCode testCode ) -// DefaultWriter is the default io.Writer used the Gin for debug output and +// DefaultWriter is the default io.Writer used by Gin for debug output and // middleware output like Logger() or Recovery(). // Note that both Logger and Recovery provides custom ways to configure their // output io.Writer. @@ -32,21 +37,24 @@ const ( // import "github.com/mattn/go-colorable" // gin.DefaultWriter = colorable.NewColorableStdout() var DefaultWriter io.Writer = os.Stdout + +// DefaultErrorWriter is the default io.Writer used by Gin to debug errors var DefaultErrorWriter io.Writer = os.Stderr var ginMode = debugCode var modeName = DebugMode func init() { - mode := os.Getenv(ENV_GIN_MODE) - if len(mode) == 0 { - SetMode(DebugMode) - } else { - SetMode(mode) - } + mode := os.Getenv(EnvGinMode) + SetMode(mode) } +// SetMode sets gin mode according to input string. func SetMode(value string) { + if value == "" { + value = DebugMode + } + switch value { case DebugMode: ginMode = debugCode @@ -55,15 +63,30 @@ func SetMode(value string) { case TestMode: ginMode = testCode default: - panic("gin mode unknown: " + value) + panic("gin mode unknown: " + value + " (available mode: debug release test)") } + modeName = value } +// DisableBindValidation closes the default validator. func DisableBindValidation() { binding.Validator = nil } +// EnableJsonDecoderUseNumber sets true for binding.EnableDecoderUseNumber to +// call the UseNumber method on the JSON Decoder instance. +func EnableJsonDecoderUseNumber() { + binding.EnableDecoderUseNumber = true +} + +// EnableJsonDecoderDisallowUnknownFields sets true for binding.EnableDecoderDisallowUnknownFields to +// call the DisallowUnknownFields method on the JSON Decoder instance. +func EnableJsonDecoderDisallowUnknownFields() { + binding.EnableDecoderDisallowUnknownFields = true +} + +// Mode returns currently gin mode. func Mode() string { return modeName } diff --git a/vendor/github.com/gin-gonic/gin/path.go b/vendor/github.com/gin-gonic/gin/path.go index d7e7458b5..d42d6b9d0 100644 --- a/vendor/github.com/gin-gonic/gin/path.go +++ b/vendor/github.com/gin-gonic/gin/path.go @@ -5,7 +5,7 @@ package gin -// CleanPath is the URL version of path.Clean, it returns a canonical URL path +// cleanPath is the URL version of path.Clean, it returns a canonical URL path // for p, eliminating . and .. elements. // // The following rules are applied iteratively until no further processing can @@ -17,15 +17,19 @@ package gin // 4. Eliminate .. elements that begin a rooted path: // that is, replace "/.." by "/" at the beginning of a path. // -// If the result of this process is an empty string, "/" is returned +// If the result of this process is an empty string, "/" is returned. func cleanPath(p string) string { + const stackBufSize = 128 // Turn empty string into "/" if p == "" { return "/" } + // Reasonably sized buffer on stack to avoid allocations in the common case. + // If a larger buffer is required, it gets allocated dynamically. + buf := make([]byte, 0, stackBufSize) + n := len(p) - var buf []byte // Invariants: // reading from path; r is index of next byte to process. @@ -37,15 +41,21 @@ func cleanPath(p string) string { if p[0] != '/' { r = 0 - buf = make([]byte, n+1) + + if n+1 > stackBufSize { + buf = make([]byte, n+1) + } else { + buf = buf[:n+1] + } buf[0] = '/' } - trailing := n > 2 && p[n-1] == '/' + trailing := n > 1 && p[n-1] == '/' // A bit more clunky without a 'lazybuf' like the path package, but the loop - // gets completely inlined (bufApp). So in contrast to the path package this - // loop has no expensive function calls (except 1x make) + // gets completely inlined (bufApp calls). + // loop has no expensive function calls (except 1x make) // So in contrast to the path package this loop has no expensive function + // calls (except make, if needed). for r < n { switch { @@ -59,17 +69,17 @@ func cleanPath(p string) string { case p[r] == '.' && p[r+1] == '/': // . element - r++ + r += 2 case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): // .. element: remove to last / - r += 2 + r += 3 if w > 1 { // can backtrack w-- - if buf == nil { + if len(buf) == 0 { for w > 1 && p[w] != '/' { w-- } @@ -81,14 +91,14 @@ func cleanPath(p string) string { } default: - // real path element. - // add slash if needed + // Real path element. + // Add slash if needed if w > 1 { bufApp(&buf, p, w, '/') w++ } - // copy element + // Copy element for r < n && p[r] != '/' { bufApp(&buf, p, w, p[r]) w++ @@ -97,27 +107,44 @@ func cleanPath(p string) string { } } - // re-append trailing slash + // Re-append trailing slash if trailing && w > 1 { bufApp(&buf, p, w, '/') w++ } - if buf == nil { + // If the original string was not modified (or only shortened at the end), + // return the respective substring of the original string. + // Otherwise return a new string from the buffer. + if len(buf) == 0 { return p[:w] } return string(buf[:w]) } -// internal helper to lazily create a buffer if necessary +// Internal helper to lazily create a buffer if necessary. +// Calls to this function get inlined. func bufApp(buf *[]byte, s string, w int, c byte) { - if *buf == nil { + b := *buf + if len(b) == 0 { + // No modification of the original string so far. + // If the next character is the same as in the original string, we do + // not yet have to allocate a buffer. if s[w] == c { return } - *buf = make([]byte, len(s)) - copy(*buf, s[:w]) + // Otherwise use either the stack buffer, if it is large enough, or + // allocate a new buffer on the heap, and copy all previous characters. + length := len(s) + if length > cap(b) { + *buf = make([]byte, length) + } else { + *buf = (*buf)[:length] + } + b = *buf + + copy(b, s[:w]) } - (*buf)[w] = c + b[w] = c } diff --git a/vendor/github.com/gin-gonic/gin/recovery.go b/vendor/github.com/gin-gonic/gin/recovery.go index c502f3553..563f5aaa8 100644 --- a/vendor/github.com/gin-gonic/gin/recovery.go +++ b/vendor/github.com/gin-gonic/gin/recovery.go @@ -10,8 +10,13 @@ import ( "io" "io/ioutil" "log" + "net" + "net/http" "net/http/httputil" + "os" "runtime" + "strings" + "time" ) var ( @@ -21,12 +26,29 @@ var ( slash = []byte("/") ) +// RecoveryFunc defines the function passable to CustomRecovery. +type RecoveryFunc func(c *Context, err interface{}) + // Recovery returns a middleware that recovers from any panics and writes a 500 if there was one. func Recovery() HandlerFunc { return RecoveryWithWriter(DefaultErrorWriter) } -func RecoveryWithWriter(out io.Writer) HandlerFunc { +//CustomRecovery returns a middleware that recovers from any panics and calls the provided handle func to handle it. +func CustomRecovery(handle RecoveryFunc) HandlerFunc { + return RecoveryWithWriter(DefaultErrorWriter, handle) +} + +// RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one. +func RecoveryWithWriter(out io.Writer, recovery ...RecoveryFunc) HandlerFunc { + if len(recovery) > 0 { + return CustomRecoveryWithWriter(out, recovery[0]) + } + return CustomRecoveryWithWriter(out, defaultHandleRecovery) +} + +// CustomRecoveryWithWriter returns a middleware for a given writer that recovers from any panics and calls the provided handle func to handle it. +func CustomRecoveryWithWriter(out io.Writer, handle RecoveryFunc) HandlerFunc { var logger *log.Logger if out != nil { logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags) @@ -34,19 +56,55 @@ func RecoveryWithWriter(out io.Writer) HandlerFunc { return func(c *Context) { defer func() { if err := recover(); err != nil { + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + var brokenPipe bool + if ne, ok := err.(*net.OpError); ok { + if se, ok := ne.Err.(*os.SyscallError); ok { + if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { + brokenPipe = true + } + } + } if logger != nil { stack := stack(3) - httprequest, _ := httputil.DumpRequest(c.Request, false) - logger.Printf("[Recovery] panic recovered:\n%s\n%s\n%s%s", string(httprequest), err, stack, reset) + httpRequest, _ := httputil.DumpRequest(c.Request, false) + headers := strings.Split(string(httpRequest), "\r\n") + for idx, header := range headers { + current := strings.Split(header, ":") + if current[0] == "Authorization" { + headers[idx] = current[0] + ": *" + } + } + headersToStr := strings.Join(headers, "\r\n") + if brokenPipe { + logger.Printf("%s\n%s%s", err, headersToStr, reset) + } else if IsDebugging() { + logger.Printf("[Recovery] %s panic recovered:\n%s\n%s\n%s%s", + timeFormat(time.Now()), headersToStr, err, stack, reset) + } else { + logger.Printf("[Recovery] %s panic recovered:\n%s\n%s%s", + timeFormat(time.Now()), err, stack, reset) + } + } + if brokenPipe { + // If the connection is dead, we can't write a status to it. + c.Error(err.(error)) // nolint: errcheck + c.Abort() + } else { + handle(c, err) } - c.AbortWithStatus(500) } }() c.Next() } } -// stack returns a nicely formated stack frame, skipping skip frames +func defaultHandleRecovery(c *Context, err interface{}) { + c.AbortWithStatus(http.StatusInternalServerError) +} + +// stack returns a nicely formatted stack frame, skipping skip frames. func stack(skip int) []byte { buf := new(bytes.Buffer) // the returned data // As we loop, we open files and read them. These variables record the currently @@ -97,8 +155,8 @@ func function(pc uintptr) []byte { // *T.ptrmethod // Also the package path might contains dot (e.g. code.google.com/...), // so first eliminate the path prefix - if lastslash := bytes.LastIndex(name, slash); lastslash >= 0 { - name = name[lastslash+1:] + if lastSlash := bytes.LastIndex(name, slash); lastSlash >= 0 { + name = name[lastSlash+1:] } if period := bytes.Index(name, dot); period >= 0 { name = name[period+1:] @@ -106,3 +164,8 @@ func function(pc uintptr) []byte { name = bytes.Replace(name, centerDot, dot, -1) return name } + +func timeFormat(t time.Time) string { + timeString := t.Format("2006/01/02 - 15:04:05") + return timeString +} diff --git a/vendor/github.com/gin-gonic/gin/render/data.go b/vendor/github.com/gin-gonic/gin/render/data.go index c296042c9..6ba657ba0 100644 --- a/vendor/github.com/gin-gonic/gin/render/data.go +++ b/vendor/github.com/gin-gonic/gin/render/data.go @@ -6,18 +6,20 @@ package render import "net/http" +// Data contains ContentType and bytes data. type Data struct { ContentType string Data []byte } -// Render (Data) writes data with custom ContentType +// Render (Data) writes data with custom ContentType. func (r Data) Render(w http.ResponseWriter) (err error) { r.WriteContentType(w) _, err = w.Write(r.Data) return } +// WriteContentType (Data) writes custom ContentType. func (r Data) WriteContentType(w http.ResponseWriter) { writeContentType(w, []string{r.ContentType}) } diff --git a/vendor/github.com/gin-gonic/gin/render/html.go b/vendor/github.com/gin-gonic/gin/render/html.go index cf91219cc..6696ece99 100644 --- a/vendor/github.com/gin-gonic/gin/render/html.go +++ b/vendor/github.com/gin-gonic/gin/render/html.go @@ -9,37 +9,44 @@ import ( "net/http" ) -type ( - Delims struct { - Left string - Right string - } +// Delims represents a set of Left and Right delimiters for HTML template rendering. +type Delims struct { + // Left delimiter, defaults to {{. + Left string + // Right delimiter, defaults to }}. + Right string +} - HTMLRender interface { - Instance(string, interface{}) Render - } +// HTMLRender interface is to be implemented by HTMLProduction and HTMLDebug. +type HTMLRender interface { + // Instance returns an HTML instance. + Instance(string, interface{}) Render +} - HTMLProduction struct { - Template *template.Template - Delims Delims - } +// HTMLProduction contains template reference and its delims. +type HTMLProduction struct { + Template *template.Template + Delims Delims +} - HTMLDebug struct { - Files []string - Glob string - Delims Delims - FuncMap template.FuncMap - } +// HTMLDebug contains template delims and pattern and function with file list. +type HTMLDebug struct { + Files []string + Glob string + Delims Delims + FuncMap template.FuncMap +} - HTML struct { - Template *template.Template - Name string - Data interface{} - } -) +// HTML contains template reference and its name with given interface object. +type HTML struct { + Template *template.Template + Name string + Data interface{} +} var htmlContentType = []string{"text/html; charset=utf-8"} +// Instance (HTMLProduction) returns an HTML instance which it realizes Render interface. func (r HTMLProduction) Instance(name string, data interface{}) Render { return HTML{ Template: r.Template, @@ -48,6 +55,7 @@ func (r HTMLProduction) Instance(name string, data interface{}) Render { } } +// Instance (HTMLDebug) returns an HTML instance which it realizes Render interface. func (r HTMLDebug) Instance(name string, data interface{}) Render { return HTML{ Template: r.loadTemplate(), @@ -62,21 +70,23 @@ func (r HTMLDebug) loadTemplate() *template.Template { if len(r.Files) > 0 { return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseFiles(r.Files...)) } - if len(r.Glob) > 0 { + if r.Glob != "" { return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseGlob(r.Glob)) } panic("the HTML debug render was created without files or glob pattern") } +// Render (HTML) executes template and writes its result with custom ContentType for response. func (r HTML) Render(w http.ResponseWriter) error { r.WriteContentType(w) - if len(r.Name) == 0 { + if r.Name == "" { return r.Template.Execute(w, r.Data) } return r.Template.ExecuteTemplate(w, r.Name, r.Data) } +// WriteContentType (HTML) writes HTML ContentType. func (r HTML) WriteContentType(w http.ResponseWriter) { writeContentType(w, htmlContentType) } diff --git a/vendor/github.com/gin-gonic/gin/render/json.go b/vendor/github.com/gin-gonic/gin/render/json.go index 3ee8b1326..418630939 100644 --- a/vendor/github.com/gin-gonic/gin/render/json.go +++ b/vendor/github.com/gin-gonic/gin/render/json.go @@ -5,22 +5,52 @@ package render import ( - "encoding/json" + "bytes" + "fmt" + "html/template" "net/http" + + "github.com/gin-gonic/gin/internal/bytesconv" + "github.com/gin-gonic/gin/internal/json" ) -type ( - JSON struct { - Data interface{} - } +// JSON contains the given interface object. +type JSON struct { + Data interface{} +} - IndentedJSON struct { - Data interface{} - } -) +// IndentedJSON contains the given interface object. +type IndentedJSON struct { + Data interface{} +} + +// SecureJSON contains the given interface object and its prefix. +type SecureJSON struct { + Prefix string + Data interface{} +} + +// JsonpJSON contains the given interface object its callback. +type JsonpJSON struct { + Callback string + Data interface{} +} + +// AsciiJSON contains the given interface object. +type AsciiJSON struct { + Data interface{} +} + +// PureJSON contains the given interface object. +type PureJSON struct { + Data interface{} +} var jsonContentType = []string{"application/json; charset=utf-8"} +var jsonpContentType = []string{"application/javascript; charset=utf-8"} +var jsonAsciiContentType = []string{"application/json"} +// Render (JSON) writes data with custom ContentType. func (r JSON) Render(w http.ResponseWriter) (err error) { if err = WriteJSON(w, r.Data); err != nil { panic(err) @@ -28,30 +58,136 @@ func (r JSON) Render(w http.ResponseWriter) (err error) { return } +// WriteContentType (JSON) writes JSON ContentType. func (r JSON) WriteContentType(w http.ResponseWriter) { writeContentType(w, jsonContentType) } +// WriteJSON marshals the given interface object and writes it with custom ContentType. func WriteJSON(w http.ResponseWriter, obj interface{}) error { writeContentType(w, jsonContentType) jsonBytes, err := json.Marshal(obj) if err != nil { return err } - w.Write(jsonBytes) - return nil + _, err = w.Write(jsonBytes) + return err } +// Render (IndentedJSON) marshals the given interface object and writes it with custom ContentType. func (r IndentedJSON) Render(w http.ResponseWriter) error { r.WriteContentType(w) jsonBytes, err := json.MarshalIndent(r.Data, "", " ") if err != nil { return err } - w.Write(jsonBytes) - return nil + _, err = w.Write(jsonBytes) + return err } +// WriteContentType (IndentedJSON) writes JSON ContentType. func (r IndentedJSON) WriteContentType(w http.ResponseWriter) { writeContentType(w, jsonContentType) } + +// Render (SecureJSON) marshals the given interface object and writes it with custom ContentType. +func (r SecureJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + jsonBytes, err := json.Marshal(r.Data) + if err != nil { + return err + } + // if the jsonBytes is array values + if bytes.HasPrefix(jsonBytes, bytesconv.StringToBytes("[")) && bytes.HasSuffix(jsonBytes, + bytesconv.StringToBytes("]")) { + _, err = w.Write(bytesconv.StringToBytes(r.Prefix)) + if err != nil { + return err + } + } + _, err = w.Write(jsonBytes) + return err +} + +// WriteContentType (SecureJSON) writes JSON ContentType. +func (r SecureJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +// Render (JsonpJSON) marshals the given interface object and writes it and its callback with custom ContentType. +func (r JsonpJSON) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + ret, err := json.Marshal(r.Data) + if err != nil { + return err + } + + if r.Callback == "" { + _, err = w.Write(ret) + return err + } + + callback := template.JSEscapeString(r.Callback) + _, err = w.Write(bytesconv.StringToBytes(callback)) + if err != nil { + return err + } + _, err = w.Write(bytesconv.StringToBytes("(")) + if err != nil { + return err + } + _, err = w.Write(ret) + if err != nil { + return err + } + _, err = w.Write(bytesconv.StringToBytes(");")) + if err != nil { + return err + } + + return nil +} + +// WriteContentType (JsonpJSON) writes Javascript ContentType. +func (r JsonpJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonpContentType) +} + +// Render (AsciiJSON) marshals the given interface object and writes it with custom ContentType. +func (r AsciiJSON) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + ret, err := json.Marshal(r.Data) + if err != nil { + return err + } + + var buffer bytes.Buffer + for _, r := range bytesconv.BytesToString(ret) { + cvt := string(r) + if r >= 128 { + cvt = fmt.Sprintf("\\u%04x", int64(r)) + } + buffer.WriteString(cvt) + } + + _, err = w.Write(buffer.Bytes()) + return err +} + +// WriteContentType (AsciiJSON) writes JSON ContentType. +func (r AsciiJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonAsciiContentType) +} + +// Render (PureJSON) writes custom ContentType and encodes the given interface object. +func (r PureJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + encoder := json.NewEncoder(w) + encoder.SetEscapeHTML(false) + return encoder.Encode(r.Data) +} + +// WriteContentType (PureJSON) writes custom ContentType. +func (r PureJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} diff --git a/vendor/github.com/gin-gonic/gin/render/msgpack.go b/vendor/github.com/gin-gonic/gin/render/msgpack.go index e6c13e588..6ef5b6e51 100644 --- a/vendor/github.com/gin-gonic/gin/render/msgpack.go +++ b/vendor/github.com/gin-gonic/gin/render/msgpack.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a MIT style // license that can be found in the LICENSE file. +//go:build !nomsgpack +// +build !nomsgpack + package render import ( @@ -10,22 +13,30 @@ import ( "github.com/ugorji/go/codec" ) +var ( + _ Render = MsgPack{} +) + +// MsgPack contains the given interface object. type MsgPack struct { Data interface{} } var msgpackContentType = []string{"application/msgpack; charset=utf-8"} +// WriteContentType (MsgPack) writes MsgPack ContentType. func (r MsgPack) WriteContentType(w http.ResponseWriter) { writeContentType(w, msgpackContentType) } +// Render (MsgPack) encodes the given interface object and writes data with custom ContentType. func (r MsgPack) Render(w http.ResponseWriter) error { return WriteMsgPack(w, r.Data) } +// WriteMsgPack writes MsgPack ContentType and encodes the given interface object. func WriteMsgPack(w http.ResponseWriter, obj interface{}) error { writeContentType(w, msgpackContentType) - var h codec.Handle = new(codec.MsgpackHandle) - return codec.NewEncoder(w, h).Encode(obj) + var mh codec.MsgpackHandle + return codec.NewEncoder(w, &mh).Encode(obj) } diff --git a/vendor/github.com/gin-gonic/gin/render/protobuf.go b/vendor/github.com/gin-gonic/gin/render/protobuf.go new file mode 100644 index 000000000..15aca9959 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/protobuf.go @@ -0,0 +1,36 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "net/http" + + "github.com/golang/protobuf/proto" +) + +// ProtoBuf contains the given interface object. +type ProtoBuf struct { + Data interface{} +} + +var protobufContentType = []string{"application/x-protobuf"} + +// Render (ProtoBuf) marshals the given interface object and writes data with custom ContentType. +func (r ProtoBuf) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + bytes, err := proto.Marshal(r.Data.(proto.Message)) + if err != nil { + return err + } + + _, err = w.Write(bytes) + return err +} + +// WriteContentType (ProtoBuf) writes ProtoBuf ContentType. +func (r ProtoBuf) WriteContentType(w http.ResponseWriter) { + writeContentType(w, protobufContentType) +} diff --git a/vendor/github.com/gin-gonic/gin/render/reader.go b/vendor/github.com/gin-gonic/gin/render/reader.go new file mode 100644 index 000000000..d5282e492 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/render/reader.go @@ -0,0 +1,48 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "io" + "net/http" + "strconv" +) + +// Reader contains the IO reader and its length, and custom ContentType and other headers. +type Reader struct { + ContentType string + ContentLength int64 + Reader io.Reader + Headers map[string]string +} + +// Render (Reader) writes data with custom ContentType and headers. +func (r Reader) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + if r.ContentLength >= 0 { + if r.Headers == nil { + r.Headers = map[string]string{} + } + r.Headers["Content-Length"] = strconv.FormatInt(r.ContentLength, 10) + } + r.writeHeaders(w, r.Headers) + _, err = io.Copy(w, r.Reader) + return +} + +// WriteContentType (Reader) writes custom ContentType. +func (r Reader) WriteContentType(w http.ResponseWriter) { + writeContentType(w, []string{r.ContentType}) +} + +// writeHeaders writes custom Header. +func (r Reader) writeHeaders(w http.ResponseWriter, headers map[string]string) { + header := w.Header() + for k, v := range headers { + if header.Get(k) == "" { + header.Set(k, v) + } + } +} diff --git a/vendor/github.com/gin-gonic/gin/render/redirect.go b/vendor/github.com/gin-gonic/gin/render/redirect.go index f874a3515..c006691ca 100644 --- a/vendor/github.com/gin-gonic/gin/render/redirect.go +++ b/vendor/github.com/gin-gonic/gin/render/redirect.go @@ -9,18 +9,21 @@ import ( "net/http" ) +// Redirect contains the http request reference and redirects status code and location. type Redirect struct { Code int Request *http.Request Location string } +// Render (Redirect) redirects the http request to new location and writes redirect response. func (r Redirect) Render(w http.ResponseWriter) error { - if (r.Code < 300 || r.Code > 308) && r.Code != 201 { + if (r.Code < http.StatusMultipleChoices || r.Code > http.StatusPermanentRedirect) && r.Code != http.StatusCreated { panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code)) } http.Redirect(w, r.Request, r.Location, r.Code) return nil } +// WriteContentType (Redirect) don't write any ContentType. func (r Redirect) WriteContentType(http.ResponseWriter) {} diff --git a/vendor/github.com/gin-gonic/gin/render/render.go b/vendor/github.com/gin-gonic/gin/render/render.go index 462914214..bcd568bfb 100644 --- a/vendor/github.com/gin-gonic/gin/render/render.go +++ b/vendor/github.com/gin-gonic/gin/render/render.go @@ -6,14 +6,19 @@ package render import "net/http" +// Render interface is to be implemented by JSON, XML, HTML, YAML and so on. type Render interface { + // Render writes data with custom ContentType. Render(http.ResponseWriter) error + // WriteContentType writes custom ContentType. WriteContentType(w http.ResponseWriter) } var ( _ Render = JSON{} _ Render = IndentedJSON{} + _ Render = SecureJSON{} + _ Render = JsonpJSON{} _ Render = XML{} _ Render = String{} _ Render = Redirect{} @@ -22,8 +27,9 @@ var ( _ HTMLRender = HTMLDebug{} _ HTMLRender = HTMLProduction{} _ Render = YAML{} - _ Render = MsgPack{} - _ Render = MsgPack{} + _ Render = Reader{} + _ Render = AsciiJSON{} + _ Render = ProtoBuf{} ) func writeContentType(w http.ResponseWriter, value []string) { diff --git a/vendor/github.com/gin-gonic/gin/render/text.go b/vendor/github.com/gin-gonic/gin/render/text.go index 74cd26bee..461b720af 100644 --- a/vendor/github.com/gin-gonic/gin/render/text.go +++ b/vendor/github.com/gin-gonic/gin/render/text.go @@ -6,10 +6,12 @@ package render import ( "fmt" - "io" "net/http" + + "github.com/gin-gonic/gin/internal/bytesconv" ) +// String contains the given interface object slice and its format. type String struct { Format string Data []interface{} @@ -17,20 +19,23 @@ type String struct { var plainContentType = []string{"text/plain; charset=utf-8"} +// Render (String) writes data with custom ContentType. func (r String) Render(w http.ResponseWriter) error { - WriteString(w, r.Format, r.Data) - return nil + return WriteString(w, r.Format, r.Data) } +// WriteContentType (String) writes Plain ContentType. func (r String) WriteContentType(w http.ResponseWriter) { writeContentType(w, plainContentType) } -func WriteString(w http.ResponseWriter, format string, data []interface{}) { +// WriteString writes data according to its format and write custom ContentType. +func WriteString(w http.ResponseWriter, format string, data []interface{}) (err error) { writeContentType(w, plainContentType) if len(data) > 0 { - fmt.Fprintf(w, format, data...) - } else { - io.WriteString(w, format) + _, err = fmt.Fprintf(w, format, data...) + return } + _, err = w.Write(bytesconv.StringToBytes(format)) + return } diff --git a/vendor/github.com/gin-gonic/gin/render/xml.go b/vendor/github.com/gin-gonic/gin/render/xml.go index cff1ac3e8..cc5390a2d 100644 --- a/vendor/github.com/gin-gonic/gin/render/xml.go +++ b/vendor/github.com/gin-gonic/gin/render/xml.go @@ -9,17 +9,20 @@ import ( "net/http" ) +// XML contains the given interface object. type XML struct { Data interface{} } var xmlContentType = []string{"application/xml; charset=utf-8"} +// Render (XML) encodes the given interface object and writes data with custom ContentType. func (r XML) Render(w http.ResponseWriter) error { r.WriteContentType(w) return xml.NewEncoder(w).Encode(r.Data) } +// WriteContentType (XML) writes XML ContentType for response. func (r XML) WriteContentType(w http.ResponseWriter) { writeContentType(w, xmlContentType) } diff --git a/vendor/github.com/gin-gonic/gin/render/yaml.go b/vendor/github.com/gin-gonic/gin/render/yaml.go index 25d0ebd47..0df783608 100644 --- a/vendor/github.com/gin-gonic/gin/render/yaml.go +++ b/vendor/github.com/gin-gonic/gin/render/yaml.go @@ -10,12 +10,14 @@ import ( "gopkg.in/yaml.v2" ) +// YAML contains the given interface object. type YAML struct { Data interface{} } var yamlContentType = []string{"application/x-yaml; charset=utf-8"} +// Render (YAML) marshals the given interface object and writes data with custom ContentType. func (r YAML) Render(w http.ResponseWriter) error { r.WriteContentType(w) @@ -24,10 +26,11 @@ func (r YAML) Render(w http.ResponseWriter) error { return err } - w.Write(bytes) - return nil + _, err = w.Write(bytes) + return err } +// WriteContentType (YAML) writes YAML ContentType for response. func (r YAML) WriteContentType(w http.ResponseWriter) { writeContentType(w, yamlContentType) } diff --git a/vendor/github.com/gin-gonic/gin/response_writer.go b/vendor/github.com/gin-gonic/gin/response_writer.go index fcbe230d0..26826689a 100644 --- a/vendor/github.com/gin-gonic/gin/response_writer.go +++ b/vendor/github.com/gin-gonic/gin/response_writer.go @@ -13,39 +13,41 @@ import ( const ( noWritten = -1 - defaultStatus = 200 + defaultStatus = http.StatusOK ) -type ( - ResponseWriter interface { - http.ResponseWriter - http.Hijacker - http.Flusher - http.CloseNotifier +// ResponseWriter ... +type ResponseWriter interface { + http.ResponseWriter + http.Hijacker + http.Flusher + http.CloseNotifier - // Returns the HTTP response status code of the current request. - Status() int + // Returns the HTTP response status code of the current request. + Status() int - // Returns the number of bytes already written into the response http body. - // See Written() - Size() int + // Returns the number of bytes already written into the response http body. + // See Written() + Size() int - // Writes the string into the response body. - WriteString(string) (int, error) + // Writes the string into the response body. + WriteString(string) (int, error) - // Returns true if the response body was already written. - Written() bool + // Returns true if the response body was already written. + Written() bool - // Forces to write the http header (status code + headers). - WriteHeaderNow() - } + // Forces to write the http header (status code + headers). + WriteHeaderNow() - responseWriter struct { - http.ResponseWriter - size int - status int - } -) + // get the http.Pusher for server push + Pusher() http.Pusher +} + +type responseWriter struct { + http.ResponseWriter + size int + status int +} var _ ResponseWriter = &responseWriter{} @@ -97,7 +99,7 @@ func (w *responseWriter) Written() bool { return w.size != noWritten } -// Implements the http.Hijacker interface +// Hijack implements the http.Hijacker interface. func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { if w.size < 0 { w.size = 0 @@ -105,12 +107,20 @@ func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { return w.ResponseWriter.(http.Hijacker).Hijack() } -// Implements the http.CloseNotify interface +// CloseNotify implements the http.CloseNotify interface. func (w *responseWriter) CloseNotify() <-chan bool { return w.ResponseWriter.(http.CloseNotifier).CloseNotify() } -// Implements the http.Flush interface +// Flush implements the http.Flush interface. func (w *responseWriter) Flush() { + w.WriteHeaderNow() w.ResponseWriter.(http.Flusher).Flush() } + +func (w *responseWriter) Pusher() (pusher http.Pusher) { + if pusher, ok := w.ResponseWriter.(http.Pusher); ok { + return pusher + } + return nil +} diff --git a/vendor/github.com/gin-gonic/gin/routergroup.go b/vendor/github.com/gin-gonic/gin/routergroup.go index f22729bbd..15d9930d3 100644 --- a/vendor/github.com/gin-gonic/gin/routergroup.go +++ b/vendor/github.com/gin-gonic/gin/routergroup.go @@ -11,50 +11,50 @@ import ( "strings" ) -type ( - IRouter interface { - IRoutes - Group(string, ...HandlerFunc) *RouterGroup - } +// IRouter defines all router handle interface includes single and group router. +type IRouter interface { + IRoutes + Group(string, ...HandlerFunc) *RouterGroup +} - IRoutes interface { - Use(...HandlerFunc) IRoutes - - Handle(string, string, ...HandlerFunc) IRoutes - Any(string, ...HandlerFunc) IRoutes - GET(string, ...HandlerFunc) IRoutes - POST(string, ...HandlerFunc) IRoutes - DELETE(string, ...HandlerFunc) IRoutes - PATCH(string, ...HandlerFunc) IRoutes - PUT(string, ...HandlerFunc) IRoutes - OPTIONS(string, ...HandlerFunc) IRoutes - HEAD(string, ...HandlerFunc) IRoutes - - StaticFile(string, string) IRoutes - Static(string, string) IRoutes - StaticFS(string, http.FileSystem) IRoutes - } +// IRoutes defines all router handle interface. +type IRoutes interface { + Use(...HandlerFunc) IRoutes - // RouterGroup is used internally to configure router, a RouterGroup is associated with a prefix - // and an array of handlers (middleware) - RouterGroup struct { - Handlers HandlersChain - basePath string - engine *Engine - root bool - } -) + Handle(string, string, ...HandlerFunc) IRoutes + Any(string, ...HandlerFunc) IRoutes + GET(string, ...HandlerFunc) IRoutes + POST(string, ...HandlerFunc) IRoutes + DELETE(string, ...HandlerFunc) IRoutes + PATCH(string, ...HandlerFunc) IRoutes + PUT(string, ...HandlerFunc) IRoutes + OPTIONS(string, ...HandlerFunc) IRoutes + HEAD(string, ...HandlerFunc) IRoutes + + StaticFile(string, string) IRoutes + Static(string, string) IRoutes + StaticFS(string, http.FileSystem) IRoutes +} + +// RouterGroup is used internally to configure router, a RouterGroup is associated with +// a prefix and an array of handlers (middleware). +type RouterGroup struct { + Handlers HandlersChain + basePath string + engine *Engine + root bool +} var _ IRouter = &RouterGroup{} -// Use adds middleware to the group, see example code in github. +// Use adds middleware to the group, see example code in GitHub. func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes { group.Handlers = append(group.Handlers, middleware...) return group.returnObj() } -// Group creates a new router group. You should add all the routes that have common middlwares or the same path prefix. -// For example, all the routes that use a common middlware for authorization could be grouped. +// Group creates a new router group. You should add all the routes that have common middlewares or the same path prefix. +// For example, all the routes that use a common middleware for authorization could be grouped. func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup { return &RouterGroup{ Handlers: group.combineHandlers(handlers), @@ -63,6 +63,8 @@ func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *R } } +// BasePath returns the base path of router group. +// For example, if v := router.Group("/rest/n/v1/api"), v.BasePath() is "/rest/n/v1/api". func (group *RouterGroup) BasePath() string { return group.basePath } @@ -76,7 +78,7 @@ func (group *RouterGroup) handle(httpMethod, relativePath string, handlers Handl // Handle registers a new request handle and middleware with the given path and method. // The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes. -// See the example code in github. +// See the example code in GitHub. // // For GET, POST, PUT, PATCH and DELETE requests the respective shortcut // functions can be used. @@ -91,57 +93,57 @@ func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...Ha return group.handle(httpMethod, relativePath, handlers) } -// POST is a shortcut for router.Handle("POST", path, handle) +// POST is a shortcut for router.Handle("POST", path, handle). func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("POST", relativePath, handlers) + return group.handle(http.MethodPost, relativePath, handlers) } -// GET is a shortcut for router.Handle("GET", path, handle) +// GET is a shortcut for router.Handle("GET", path, handle). func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("GET", relativePath, handlers) + return group.handle(http.MethodGet, relativePath, handlers) } -// DELETE is a shortcut for router.Handle("DELETE", path, handle) +// DELETE is a shortcut for router.Handle("DELETE", path, handle). func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("DELETE", relativePath, handlers) + return group.handle(http.MethodDelete, relativePath, handlers) } -// PATCH is a shortcut for router.Handle("PATCH", path, handle) +// PATCH is a shortcut for router.Handle("PATCH", path, handle). func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("PATCH", relativePath, handlers) + return group.handle(http.MethodPatch, relativePath, handlers) } -// PUT is a shortcut for router.Handle("PUT", path, handle) +// PUT is a shortcut for router.Handle("PUT", path, handle). func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("PUT", relativePath, handlers) + return group.handle(http.MethodPut, relativePath, handlers) } -// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle) +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle). func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("OPTIONS", relativePath, handlers) + return group.handle(http.MethodOptions, relativePath, handlers) } -// HEAD is a shortcut for router.Handle("HEAD", path, handle) +// HEAD is a shortcut for router.Handle("HEAD", path, handle). func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes { - return group.handle("HEAD", relativePath, handlers) + return group.handle(http.MethodHead, relativePath, handlers) } // Any registers a route that matches all the HTTP methods. -// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE +// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE. func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes { - group.handle("GET", relativePath, handlers) - group.handle("POST", relativePath, handlers) - group.handle("PUT", relativePath, handlers) - group.handle("PATCH", relativePath, handlers) - group.handle("HEAD", relativePath, handlers) - group.handle("OPTIONS", relativePath, handlers) - group.handle("DELETE", relativePath, handlers) - group.handle("CONNECT", relativePath, handlers) - group.handle("TRACE", relativePath, handlers) + group.handle(http.MethodGet, relativePath, handlers) + group.handle(http.MethodPost, relativePath, handlers) + group.handle(http.MethodPut, relativePath, handlers) + group.handle(http.MethodPatch, relativePath, handlers) + group.handle(http.MethodHead, relativePath, handlers) + group.handle(http.MethodOptions, relativePath, handlers) + group.handle(http.MethodDelete, relativePath, handlers) + group.handle(http.MethodConnect, relativePath, handlers) + group.handle(http.MethodTrace, relativePath, handlers) return group.returnObj() } -// StaticFile registers a single route in order to server a single file of the local filesystem. +// StaticFile registers a single route in order to serve a single file of the local filesystem. // router.StaticFile("favicon.ico", "./resources/favicon.ico") func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes { if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { @@ -183,11 +185,24 @@ func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRou func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc { absolutePath := group.calculateAbsolutePath(relativePath) fileServer := http.StripPrefix(absolutePath, http.FileServer(fs)) - _, nolisting := fs.(*onlyfilesFS) + return func(c *Context) { - if nolisting { - c.Writer.WriteHeader(404) + if _, noListing := fs.(*onlyFilesFS); noListing { + c.Writer.WriteHeader(http.StatusNotFound) } + + file := c.Param("filepath") + // Check if file exists and/or if we have permission to access it + f, err := fs.Open(file) + if err != nil { + c.Writer.WriteHeader(http.StatusNotFound) + c.handlers = group.engine.noRoute + // Reset index + c.index = -1 + return + } + f.Close() + fileServer.ServeHTTP(c.Writer, c.Request) } } diff --git a/vendor/github.com/gin-gonic/gin/test_helpers.go b/vendor/github.com/gin-gonic/gin/test_helpers.go index e7dd55f77..3a7a5ddf6 100644 --- a/vendor/github.com/gin-gonic/gin/test_helpers.go +++ b/vendor/github.com/gin-gonic/gin/test_helpers.go @@ -4,10 +4,9 @@ package gin -import ( - "net/http" -) +import "net/http" +// CreateTestContext returns a fresh engine and context for testing purposes func CreateTestContext(w http.ResponseWriter) (c *Context, r *Engine) { r = New() c = r.allocateContext() diff --git a/vendor/github.com/gin-gonic/gin/tree.go b/vendor/github.com/gin-gonic/gin/tree.go index a39f43bf2..0d082d057 100644 --- a/vendor/github.com/gin-gonic/gin/tree.go +++ b/vendor/github.com/gin-gonic/gin/tree.go @@ -5,9 +5,18 @@ package gin import ( + "bytes" "net/url" "strings" "unicode" + "unicode/utf8" + + "github.com/gin-gonic/gin/internal/bytesconv" +) + +var ( + strColon = []byte(":") + strStar = []byte("*") ) // Param is a single URL parameter, consisting of a key and a value. @@ -62,18 +71,31 @@ func min(a, b int) int { return b } -func countParams(path string) uint8 { - var n uint - for i := 0; i < len(path); i++ { - if path[i] != ':' && path[i] != '*' { - continue - } - n++ +func longestCommonPrefix(a, b string) int { + i := 0 + max := min(len(a), len(b)) + for i < max && a[i] == b[i] { + i++ } - if n >= 255 { - return 255 + return i +} + +// addChild will add a child node, keeping wildcards at the end +func (n *node) addChild(child *node) { + if n.wildChild && len(n.children) > 0 { + wildcardChild := n.children[len(n.children)-1] + n.children = append(n.children[:len(n.children)-1], child, wildcardChild) + } else { + n.children = append(n.children, child) } - return uint8(n) +} + +func countParams(path string) uint16 { + var n uint16 + s := bytesconv.StringToBytes(path) + n += uint16(bytes.Count(s, strColon)) + n += uint16(bytes.Count(s, strStar)) + return n } type nodeType uint8 @@ -87,36 +109,38 @@ const ( type node struct { path string + indices string wildChild bool nType nodeType - maxParams uint8 - indices string - children []*node - handlers HandlersChain priority uint32 + children []*node // child nodes, at most 1 :param style node at the end of the array + handlers HandlersChain + fullPath string +} + +type skip struct { + path string + paramNode *node } -// increments priority of the given child and reorders if necessary +// Increments priority of the given child and reorders if necessary func (n *node) incrementChildPrio(pos int) int { - n.children[pos].priority++ - prio := n.children[pos].priority + cs := n.children + cs[pos].priority++ + prio := cs[pos].priority - // adjust position (move to front) + // Adjust position (move to front) newPos := pos - for newPos > 0 && n.children[newPos-1].priority < prio { - // swap node positions - tmpN := n.children[newPos-1] - n.children[newPos-1] = n.children[newPos] - n.children[newPos] = tmpN - - newPos-- + for ; newPos > 0 && cs[newPos-1].priority < prio; newPos-- { + // Swap node positions + cs[newPos-1], cs[newPos] = cs[newPos], cs[newPos-1] } - // build new index char string + // Build new index char string if newPos != pos { - n.indices = n.indices[:newPos] + // unchanged prefix, might be empty - n.indices[pos:pos+1] + // the index char we move - n.indices[newPos:pos] + n.indices[pos+1:] // rest without char at 'pos' + n.indices = n.indices[:newPos] + // Unchanged prefix, might be empty + n.indices[pos:pos+1] + // The index char we move + n.indices[newPos:pos] + n.indices[pos+1:] // Rest without char at 'pos' } return newPos @@ -127,236 +151,252 @@ func (n *node) incrementChildPrio(pos int) int { func (n *node) addRoute(path string, handlers HandlersChain) { fullPath := path n.priority++ - numParams := countParams(path) - - // non-empty tree - if len(n.path) > 0 || len(n.children) > 0 { - walk: - for { - // Update maxParams of the current node - if numParams > n.maxParams { - n.maxParams = numParams - } - // Find the longest common prefix. - // This also implies that the common prefix contains no ':' or '*' - // since the existing key can't contain those chars. - i := 0 - max := min(len(path), len(n.path)) - for i < max && path[i] == n.path[i] { - i++ - } - - // Split edge - if i < len(n.path) { - child := node{ - path: n.path[i:], - wildChild: n.wildChild, - indices: n.indices, - children: n.children, - handlers: n.handlers, - priority: n.priority - 1, - } + // Empty tree + if len(n.path) == 0 && len(n.children) == 0 { + n.insertChild(path, fullPath, handlers) + n.nType = root + return + } - // Update maxParams (max of all children) - for i := range child.children { - if child.children[i].maxParams > child.maxParams { - child.maxParams = child.children[i].maxParams - } - } + parentFullPathIndex := 0 - n.children = []*node{&child} - // []byte for proper unicode char conversion, see #65 - n.indices = string([]byte{n.path[i]}) - n.path = path[:i] - n.handlers = nil - n.wildChild = false +walk: + for { + // Find the longest common prefix. + // This also implies that the common prefix contains no ':' or '*' + // since the existing key can't contain those chars. + i := longestCommonPrefix(path, n.path) + + // Split edge + if i < len(n.path) { + child := node{ + path: n.path[i:], + wildChild: n.wildChild, + indices: n.indices, + children: n.children, + handlers: n.handlers, + priority: n.priority - 1, + fullPath: n.fullPath, } - // Make new node a child of this node - if i < len(path) { - path = path[i:] - - if n.wildChild { - n = n.children[0] - n.priority++ - - // Update maxParams of the child node - if numParams > n.maxParams { - n.maxParams = numParams - } - numParams-- - - // Check if the wildcard matches - if len(path) >= len(n.path) && n.path == path[:len(n.path)] { - // check for longer wildcard, e.g. :name and :names - if len(n.path) >= len(path) || path[len(n.path)] == '/' { - continue walk - } - } + n.children = []*node{&child} + // []byte for proper unicode char conversion, see #65 + n.indices = bytesconv.BytesToString([]byte{n.path[i]}) + n.path = path[:i] + n.handlers = nil + n.wildChild = false + n.fullPath = fullPath[:parentFullPathIndex+i] + } - panic("path segment '" + path + - "' conflicts with existing wildcard '" + n.path + - "' in path '" + fullPath + "'") - } + // Make new node a child of this node + if i < len(path) { + path = path[i:] + c := path[0] - c := path[0] + // '/' after param + if n.nType == param && c == '/' && len(n.children) == 1 { + parentFullPathIndex += len(n.path) + n = n.children[0] + n.priority++ + continue walk + } - // slash after param - if n.nType == param && c == '/' && len(n.children) == 1 { - n = n.children[0] - n.priority++ + // Check if a child with the next path byte exists + for i, max := 0, len(n.indices); i < max; i++ { + if c == n.indices[i] { + parentFullPathIndex += len(n.path) + i = n.incrementChildPrio(i) + n = n.children[i] continue walk } + } - // Check if a child with the next path byte exists - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - i = n.incrementChildPrio(i) - n = n.children[i] - continue walk - } + // Otherwise insert it + if c != ':' && c != '*' && n.nType != catchAll { + // []byte for proper unicode char conversion, see #65 + n.indices += bytesconv.BytesToString([]byte{c}) + child := &node{ + fullPath: fullPath, } - - // Otherwise insert it - if c != ':' && c != '*' { - // []byte for proper unicode char conversion, see #65 - n.indices += string([]byte{c}) - child := &node{ - maxParams: numParams, - } - n.children = append(n.children, child) - n.incrementChildPrio(len(n.indices) - 1) - n = child + n.addChild(child) + n.incrementChildPrio(len(n.indices) - 1) + n = child + } else if n.wildChild { + // inserting a wildcard node, need to check if it conflicts with the existing wildcard + n = n.children[len(n.children)-1] + n.priority++ + + // Check if the wildcard matches + if len(path) >= len(n.path) && n.path == path[:len(n.path)] && + // Adding a child to a catchAll is not possible + n.nType != catchAll && + // Check for longer wildcard, e.g. :name and :names + (len(n.path) >= len(path) || path[len(n.path)] == '/') { + continue walk } - n.insertChild(numParams, path, fullPath, handlers) - return - } else if i == len(path) { // Make node a (in-path) leaf - if n.handlers != nil { - panic("handlers are already registered for path ''" + fullPath + "'") + // Wildcard conflict + pathSeg := path + if n.nType != catchAll { + pathSeg = strings.SplitN(pathSeg, "/", 2)[0] } - n.handlers = handlers + prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path + panic("'" + pathSeg + + "' in new path '" + fullPath + + "' conflicts with existing wildcard '" + n.path + + "' in existing prefix '" + prefix + + "'") } + + n.insertChild(path, fullPath, handlers) return } - } else { // Empty tree - n.insertChild(numParams, path, fullPath, handlers) - n.nType = root + + // Otherwise add handle to current node + if n.handlers != nil { + panic("handlers are already registered for path '" + fullPath + "'") + } + n.handlers = handlers + n.fullPath = fullPath + return } } -func (n *node) insertChild(numParams uint8, path string, fullPath string, handlers HandlersChain) { - var offset int // already handled bytes of the path - - // find prefix until first wildcard (beginning with ':'' or '*'') - for i, max := 0, len(path); numParams > 0; i++ { - c := path[i] +// Search for a wildcard segment and check the name for invalid characters. +// Returns -1 as index, if no wildcard was found. +func findWildcard(path string) (wildcard string, i int, valid bool) { + // Find start + for start, c := range []byte(path) { + // A wildcard starts with ':' (param) or '*' (catch-all) if c != ':' && c != '*' { continue } - // find wildcard end (either '/' or path end) - end := i + 1 - for end < max && path[end] != '/' { - switch path[end] { - // the wildcard name must not contain ':' and '*' + // Find end and check for invalid characters + valid = true + for end, c := range []byte(path[start+1:]) { + switch c { + case '/': + return path[start : start+1+end], start, valid case ':', '*': - panic("only one wildcard per path segment is allowed, has: '" + - path[i:] + "' in path '" + fullPath + "'") - default: - end++ + valid = false } } + return path[start:], start, valid + } + return "", -1, false +} + +func (n *node) insertChild(path string, fullPath string, handlers HandlersChain) { + for { + // Find prefix until first wildcard + wildcard, i, valid := findWildcard(path) + if i < 0 { // No wildcard found + break + } - // check if this Node existing children which would be - // unreachable if we insert the wildcard here - if len(n.children) > 0 { - panic("wildcard route '" + path[i:end] + - "' conflicts with existing children in path '" + fullPath + "'") + // The wildcard name must not contain ':' and '*' + if !valid { + panic("only one wildcard per path segment is allowed, has: '" + + wildcard + "' in path '" + fullPath + "'") } // check if the wildcard has a name - if end-i < 2 { + if len(wildcard) < 2 { panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") } - if c == ':' { // param - // split path at the beginning of the wildcard + if wildcard[0] == ':' { // param if i > 0 { - n.path = path[offset:i] - offset = i + // Insert prefix before the current wildcard + n.path = path[:i] + path = path[i:] } child := &node{ - nType: param, - maxParams: numParams, + nType: param, + path: wildcard, + fullPath: fullPath, } - n.children = []*node{child} + n.addChild(child) n.wildChild = true n = child n.priority++ - numParams-- // if the path doesn't end with the wildcard, then there // will be another non-wildcard subpath starting with '/' - if end < max { - n.path = path[offset:end] - offset = end + if len(wildcard) < len(path) { + path = path[len(wildcard):] child := &node{ - maxParams: numParams, - priority: 1, + priority: 1, + fullPath: fullPath, } - n.children = []*node{child} + n.addChild(child) n = child + continue } - } else { // catchAll - if end != max || numParams > 1 { - panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") - } + // Otherwise we're done. Insert the handle in the new leaf + n.handlers = handlers + return + } - if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { - panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") - } + // catchAll + if i+len(wildcard) != len(path) { + panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") + } - // currently fixed width 1 for '/' - i-- - if path[i] != '/' { - panic("no / before catch-all in path '" + fullPath + "'") - } + if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { + panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") + } - n.path = path[offset:i] + // currently fixed width 1 for '/' + i-- + if path[i] != '/' { + panic("no / before catch-all in path '" + fullPath + "'") + } - // first node: catchAll node with empty path - child := &node{ - wildChild: true, - nType: catchAll, - maxParams: 1, - } - n.children = []*node{child} - n.indices = string(path[i]) - n = child - n.priority++ + n.path = path[:i] - // second node: node holding the variable - child = &node{ - path: path[i:], - nType: catchAll, - maxParams: 1, - handlers: handlers, - priority: 1, - } - n.children = []*node{child} + // First node: catchAll node with empty path + child := &node{ + wildChild: true, + nType: catchAll, + fullPath: fullPath, + } - return + n.addChild(child) + n.indices = string('/') + n = child + n.priority++ + + // second node: node holding the variable + child = &node{ + path: path[i:], + nType: catchAll, + handlers: handlers, + priority: 1, + fullPath: fullPath, } + n.children = []*node{child} + + return } - // insert remaining path part and handle to the leaf - n.path = path[offset:] + // If no wildcard was found, simply insert the path and handle + n.path = path n.handlers = handlers + n.fullPath = fullPath +} + +// nodeValue holds return values of (*Node).getValue method +type nodeValue struct { + handlers HandlersChain + params *Params + tsr bool + fullPath string } // Returns the handle registered with the given path (key). The values of @@ -364,57 +404,78 @@ func (n *node) insertChild(numParams uint8, path string, fullPath string, handle // If no handle can be found, a TSR (trailing slash redirect) recommendation is // made if a handle exists with an extra (without the) trailing slash for the // given path. -func (n *node) getValue(path string, po Params, unescape bool) (handlers HandlersChain, p Params, tsr bool) { - p = po +func (n *node) getValue(path string, params *Params, unescape bool) (value nodeValue) { + var skipped *skip + walk: // Outer loop for walking the tree for { - if len(path) > len(n.path) { - if path[:len(n.path)] == n.path { - path = path[len(n.path):] - // If this node does not have a wildcard (param or catchAll) - // child, we can just look up the next child node and continue - // to walk down the tree - if !n.wildChild { - c := path[0] - for i := 0; i < len(n.indices); i++ { - if c == n.indices[i] { - n = n.children[i] - continue walk + prefix := n.path + if len(path) > len(prefix) { + if path[:len(prefix)] == prefix { + path = path[len(prefix):] + + // Try all the non-wildcard children first by matching the indices + idxc := path[0] + for i, c := range []byte(n.indices) { + if c == idxc { + if strings.HasPrefix(n.children[len(n.children)-1].path, ":") { + skipped = &skip{ + path: prefix + path, + paramNode: &node{ + path: n.path, + wildChild: n.wildChild, + nType: n.nType, + priority: n.priority, + children: n.children, + handlers: n.handlers, + fullPath: n.fullPath, + }, + } } + + n = n.children[i] + continue walk } + } + // If there is no wildcard pattern, recommend a redirection + if !n.wildChild { // Nothing found. // We can recommend to redirect to the same URL without a // trailing slash if a leaf exists for that path. - tsr = (path == "/" && n.handlers != nil) + value.tsr = (path == "/" && n.handlers != nil) return } - // handle wildcard child - n = n.children[0] + // Handle wildcard child, which is always at the end of the array + n = n.children[len(n.children)-1] + switch n.nType { case param: - // find param end (either '/' or path end) + // Find param end (either '/' or path end) end := 0 for end < len(path) && path[end] != '/' { end++ } - // save param value - if cap(p) < int(n.maxParams) { - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[1:] - val := path[:end] - if unescape { - var err error - if p[i].Value, err = url.QueryUnescape(val); err != nil { - p[i].Value = val // fallback, in case of error + // Save param value + if params != nil { + if value.params == nil { + value.params = params + } + // Expand slice within preallocated capacity + i := len(*value.params) + *value.params = (*value.params)[:i+1] + val := path[:end] + if unescape { + if v, err := url.QueryUnescape(val); err == nil { + val = v + } + } + (*value.params)[i] = Param{ + Key: n.path[1:], + Value: val, } - } else { - p[i].Value = val } // we need to go deeper! @@ -426,64 +487,75 @@ walk: // Outer loop for walking the tree } // ... but we can't - tsr = (len(path) == end+1) + value.tsr = (len(path) == end+1) return } - if handlers = n.handlers; handlers != nil { + if value.handlers = n.handlers; value.handlers != nil { + value.fullPath = n.fullPath return } if len(n.children) == 1 { // No handle found. Check if a handle for this path + a // trailing slash exists for TSR recommendation n = n.children[0] - tsr = (n.path == "/" && n.handlers != nil) + value.tsr = (n.path == "/" && n.handlers != nil) } - return case catchAll: - // save param value - if cap(p) < int(n.maxParams) { - p = make(Params, 0, n.maxParams) - } - i := len(p) - p = p[:i+1] // expand slice within preallocated capacity - p[i].Key = n.path[2:] - if unescape { - var err error - if p[i].Value, err = url.QueryUnescape(path); err != nil { - p[i].Value = path // fallback, in case of error + // Save param value + if params != nil { + if value.params == nil { + value.params = params + } + // Expand slice within preallocated capacity + i := len(*value.params) + *value.params = (*value.params)[:i+1] + val := path + if unescape { + if v, err := url.QueryUnescape(path); err == nil { + val = v + } + } + (*value.params)[i] = Param{ + Key: n.path[2:], + Value: val, } - } else { - p[i].Value = path } - handlers = n.handlers + value.handlers = n.handlers + value.fullPath = n.fullPath return default: panic("invalid node type") } } - } else if path == n.path { + } + + if path == prefix { // We should have reached the node containing the handle. // Check if this node has a handle registered. - if handlers = n.handlers; handlers != nil { + if value.handlers = n.handlers; value.handlers != nil { + value.fullPath = n.fullPath return } + // If there is no handle for this route, but this route has a + // wildcard child, there must be a handle for this path with an + // additional trailing slash if path == "/" && n.wildChild && n.nType != root { - tsr = true + value.tsr = true return } // No handle found. Check if a handle for this path + a // trailing slash exists for trailing slash recommendation - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { + for i, c := range []byte(n.indices) { + if c == '/' { n = n.children[i] - tsr = (len(n.path) == 1 && n.handlers != nil) || + value.tsr = (len(n.path) == 1 && n.handlers != nil) || (n.nType == catchAll && n.children[0].handlers != nil) return } @@ -492,11 +564,18 @@ walk: // Outer loop for walking the tree return } + if path != "/" && skipped != nil && strings.HasSuffix(skipped.path, path) { + path = skipped.path + n = skipped.paramNode + skipped = nil + continue walk + } + // Nothing found. We can recommend to redirect to the same URL with an // extra trailing slash if a leaf exists for that path - tsr = (path == "/") || - (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && - path == n.path[:len(n.path)-1] && n.handlers != nil) + value.tsr = (path == "/") || + (len(prefix) == len(path)+1 && prefix[len(path)] == '/' && + path == prefix[:len(prefix)-1] && n.handlers != nil) return } } @@ -505,104 +584,210 @@ walk: // Outer loop for walking the tree // It can optionally also fix trailing slashes. // It returns the case-corrected path and a bool indicating whether the lookup // was successful. -func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { - ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory +func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) ([]byte, bool) { + const stackBufSize = 128 + + // Use a static sized buffer on the stack in the common case. + // If the path is too long, allocate a buffer on the heap instead. + buf := make([]byte, 0, stackBufSize) + if length := len(path) + 1; length > stackBufSize { + buf = make([]byte, 0, length) + } - // Outer loop for walking the tree - for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { - path = path[len(n.path):] + ciPath := n.findCaseInsensitivePathRec( + path, + buf, // Preallocate enough memory for new path + [4]byte{}, // Empty rune buffer + fixTrailingSlash, + ) + + return ciPath, ciPath != nil +} + +// Shift bytes in array by n bytes left +func shiftNRuneBytes(rb [4]byte, n int) [4]byte { + switch n { + case 0: + return rb + case 1: + return [4]byte{rb[1], rb[2], rb[3], 0} + case 2: + return [4]byte{rb[2], rb[3]} + case 3: + return [4]byte{rb[3]} + default: + return [4]byte{} + } +} + +// Recursive case-insensitive lookup function used by n.findCaseInsensitivePath +func (n *node) findCaseInsensitivePathRec(path string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) []byte { + npLen := len(n.path) + +walk: // Outer loop for walking the tree + for len(path) >= npLen && (npLen == 0 || strings.EqualFold(path[1:npLen], n.path[1:])) { + // Add common prefix to result + oldPath := path + path = path[npLen:] ciPath = append(ciPath, n.path...) - if len(path) > 0 { - // If this node does not have a wildcard (param or catchAll) child, - // we can just look up the next child node and continue to walk down - // the tree - if !n.wildChild { - r := unicode.ToLower(rune(path[0])) - for i, index := range n.indices { - // must use recursive approach since both index and - // ToLower(index) could exist. We must check both. - if r == unicode.ToLower(index) { - out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) - if found { - return append(ciPath, out...), true + if len(path) == 0 { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if n.handlers != nil { + return ciPath + } + + // No handle found. + // Try to fix the path by adding a trailing slash + if fixTrailingSlash { + for i, c := range []byte(n.indices) { + if c == '/' { + n = n.children[i] + if (len(n.path) == 1 && n.handlers != nil) || + (n.nType == catchAll && n.children[0].handlers != nil) { + return append(ciPath, '/') } + return nil } } - - // Nothing found. We can recommend to redirect to the same URL - // without a trailing slash if a leaf exists for that path - found = (fixTrailingSlash && path == "/" && n.handlers != nil) - return } + return nil + } - n = n.children[0] - switch n.nType { - case param: - // find param end (either '/' or path end) - k := 0 - for k < len(path) && path[k] != '/' { - k++ + // If this node does not have a wildcard (param or catchAll) child, + // we can just look up the next child node and continue to walk down + // the tree + if !n.wildChild { + // Skip rune bytes already processed + rb = shiftNRuneBytes(rb, npLen) + + if rb[0] != 0 { + // Old rune not finished + idxc := rb[0] + for i, c := range []byte(n.indices) { + if c == idxc { + // continue with child node + n = n.children[i] + npLen = len(n.path) + continue walk + } } - - // add param value to case insensitive path - ciPath = append(ciPath, path[:k]...) - - // we need to go deeper! - if k < len(path) { - if len(n.children) > 0 { - path = path[k:] - n = n.children[0] - continue + } else { + // Process a new rune + var rv rune + + // Find rune start. + // Runes are up to 4 byte long, + // -4 would definitely be another rune. + var off int + for max := min(npLen, 3); off < max; off++ { + if i := npLen - off; utf8.RuneStart(oldPath[i]) { + // read rune from cached path + rv, _ = utf8.DecodeRuneInString(oldPath[i:]) + break } + } - // ... but we can't - if fixTrailingSlash && len(path) == k+1 { - return ciPath, true + // Calculate lowercase bytes of current rune + lo := unicode.ToLower(rv) + utf8.EncodeRune(rb[:], lo) + + // Skip already processed bytes + rb = shiftNRuneBytes(rb, off) + + idxc := rb[0] + for i, c := range []byte(n.indices) { + // Lowercase matches + if c == idxc { + // must use a recursive approach since both the + // uppercase byte and the lowercase byte might exist + // as an index + if out := n.children[i].findCaseInsensitivePathRec( + path, ciPath, rb, fixTrailingSlash, + ); out != nil { + return out + } + break } - return } - if n.handlers != nil { - return ciPath, true - } else if fixTrailingSlash && len(n.children) == 1 { - // No handle found. Check if a handle for this path + a - // trailing slash exists - n = n.children[0] - if n.path == "/" && n.handlers != nil { - return append(ciPath, '/'), true + // If we found no match, the same for the uppercase rune, + // if it differs + if up := unicode.ToUpper(rv); up != lo { + utf8.EncodeRune(rb[:], up) + rb = shiftNRuneBytes(rb, off) + + idxc := rb[0] + for i, c := range []byte(n.indices) { + // Uppercase matches + if c == idxc { + // Continue with child node + n = n.children[i] + npLen = len(n.path) + continue walk + } } } - return + } - case catchAll: - return append(ciPath, path...), true + // Nothing found. We can recommend to redirect to the same URL + // without a trailing slash if a leaf exists for that path + if fixTrailingSlash && path == "/" && n.handlers != nil { + return ciPath + } + return nil + } - default: - panic("invalid node type") + n = n.children[0] + switch n.nType { + case param: + // Find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ } - } else { - // We should have reached the node containing the handle. - // Check if this node has a handle registered. + + // Add param value to case insensitive path + ciPath = append(ciPath, path[:end]...) + + // We need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + // Continue with child node + n = n.children[0] + npLen = len(n.path) + path = path[end:] + continue + } + + // ... but we can't + if fixTrailingSlash && len(path) == end+1 { + return ciPath + } + return nil + } + if n.handlers != nil { - return ciPath, true + return ciPath } - // No handle found. - // Try to fix the path by adding a trailing slash - if fixTrailingSlash { - for i := 0; i < len(n.indices); i++ { - if n.indices[i] == '/' { - n = n.children[i] - if (len(n.path) == 1 && n.handlers != nil) || - (n.nType == catchAll && n.children[0].handlers != nil) { - return append(ciPath, '/'), true - } - return - } + if fixTrailingSlash && len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists + n = n.children[0] + if n.path == "/" && n.handlers != nil { + return append(ciPath, '/') } } - return + + return nil + + case catchAll: + return append(ciPath, path...) + + default: + panic("invalid node type") } } @@ -610,13 +795,12 @@ func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPa // Try to fix the path by adding / removing a trailing slash if fixTrailingSlash { if path == "/" { - return ciPath, true + return ciPath } - if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && - strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && - n.handlers != nil { - return append(ciPath, n.path...), true + if len(path)+1 == npLen && n.path[len(path)] == '/' && + strings.EqualFold(path[1:], n.path[1:len(path)]) && n.handlers != nil { + return append(ciPath, n.path...) } } - return + return nil } diff --git a/vendor/github.com/gin-gonic/gin/utils.go b/vendor/github.com/gin-gonic/gin/utils.go index 18064fb53..c32f0eeb0 100644 --- a/vendor/github.com/gin-gonic/gin/utils.go +++ b/vendor/github.com/gin-gonic/gin/utils.go @@ -14,8 +14,10 @@ import ( "strings" ) +// BindKey indicates a default bind key. const BindKey = "_gin-gonic/gin/bindkey" +// Bind is a helper function for given interface object and returns a Gin middleware. func Bind(val interface{}) HandlerFunc { value := reflect.ValueOf(val) if value.Kind() == reflect.Ptr { @@ -33,21 +35,24 @@ func Bind(val interface{}) HandlerFunc { } } +// WrapF is a helper function for wrapping http.HandlerFunc and returns a Gin middleware. func WrapF(f http.HandlerFunc) HandlerFunc { return func(c *Context) { f(c.Writer, c.Request) } } +// WrapH is a helper function for wrapping http.Handler and returns a Gin middleware. func WrapH(h http.Handler) HandlerFunc { return func(c *Context) { h.ServeHTTP(c.Writer, c.Request) } } +// H is a shortcut for map[string]interface{} type H map[string]interface{} -// MarshalXML allows type H to be used with xml.Marshal +// MarshalXML allows type H to be used with xml.Marshal. func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error { start.Name = xml.Name{ Space: "", @@ -65,10 +70,8 @@ func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error { return err } } - if err := e.EncodeToken(xml.EndElement{Name: start.Name}); err != nil { - return err - } - return nil + + return e.EncodeToken(xml.EndElement{Name: start.Name}) } func assert1(guard bool, text string) { @@ -87,25 +90,23 @@ func filterFlags(content string) string { } func chooseData(custom, wildcard interface{}) interface{} { - if custom == nil { - if wildcard == nil { - panic("negotiation config is invalid") - } + if custom != nil { + return custom + } + if wildcard != nil { return wildcard } - return custom + panic("negotiation config is invalid") } func parseAccept(acceptHeader string) []string { parts := strings.Split(acceptHeader, ",") out := make([]string, 0, len(parts)) for _, part := range parts { - index := strings.IndexByte(part, ';') - if index >= 0 { - part = part[0:index] + if i := strings.IndexByte(part, ';'); i > 0 { + part = part[:i] } - part = strings.TrimSpace(part) - if len(part) > 0 { + if part = strings.TrimSpace(part); part != "" { out = append(out, part) } } @@ -113,11 +114,10 @@ func parseAccept(acceptHeader string) []string { } func lastChar(str string) uint8 { - size := len(str) - if size == 0 { + if str == "" { panic("The length of the string can't be 0") } - return str[size-1] + return str[len(str)-1] } func nameOfFunction(f interface{}) string { @@ -125,13 +125,12 @@ func nameOfFunction(f interface{}) string { } func joinPaths(absolutePath, relativePath string) string { - if len(relativePath) == 0 { + if relativePath == "" { return absolutePath } finalPath := path.Join(absolutePath, relativePath) - appendSlash := lastChar(relativePath) == '/' && lastChar(finalPath) != '/' - if appendSlash { + if lastChar(relativePath) == '/' && lastChar(finalPath) != '/' { return finalPath + "/" } return finalPath @@ -140,7 +139,7 @@ func joinPaths(absolutePath, relativePath string) string { func resolveAddress(addr []string) string { switch len(addr) { case 0: - if port := os.Getenv("PORT"); len(port) > 0 { + if port := os.Getenv("PORT"); port != "" { debugPrint("Environment variable PORT=\"%s\"", port) return ":" + port } @@ -149,6 +148,6 @@ func resolveAddress(addr []string) string { case 1: return addr[0] default: - panic("too much parameters") + panic("too many parameters") } } diff --git a/vendor/github.com/gin-gonic/gin/version.go b/vendor/github.com/gin-gonic/gin/version.go new file mode 100644 index 000000000..a80ab69a8 --- /dev/null +++ b/vendor/github.com/gin-gonic/gin/version.go @@ -0,0 +1,8 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +// Version is the current gin framework's version. +const Version = "v1.7.2" diff --git a/vendor/github.com/gin-gonic/gin/wercker.yml b/vendor/github.com/gin-gonic/gin/wercker.yml deleted file mode 100644 index 3ab8084cc..000000000 --- a/vendor/github.com/gin-gonic/gin/wercker.yml +++ /dev/null @@ -1 +0,0 @@ -box: wercker/default \ No newline at end of file diff --git a/vendor/github.com/go-playground/locales/.gitignore b/vendor/github.com/go-playground/locales/.gitignore new file mode 100644 index 000000000..daf913b1b --- /dev/null +++ b/vendor/github.com/go-playground/locales/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/go-playground/locales/.travis.yml b/vendor/github.com/go-playground/locales/.travis.yml new file mode 100644 index 000000000..d50237a60 --- /dev/null +++ b/vendor/github.com/go-playground/locales/.travis.yml @@ -0,0 +1,26 @@ +language: go +go: + - 1.13.1 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/vendor/github.com/go-playground/locales/LICENSE b/vendor/github.com/go-playground/locales/LICENSE new file mode 100644 index 000000000..75854ac4f --- /dev/null +++ b/vendor/github.com/go-playground/locales/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/go-playground/locales/README.md b/vendor/github.com/go-playground/locales/README.md new file mode 100644 index 000000000..ba1b0680c --- /dev/null +++ b/vendor/github.com/go-playground/locales/README.md @@ -0,0 +1,172 @@ +## locales +![Project status](https://img.shields.io/badge/version-0.13.0-green.svg) +[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales) +[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within +an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator). + +Features +-------- +- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) + +Full Tests +-------------------- +I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment; +any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details. + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/locales +``` + +NOTES +-------- +You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body +of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string. + +Usage +------- +```go +package main + +import ( + "fmt" + "time" + + "github.com/go-playground/locales/currency" + "github.com/go-playground/locales/en_CA" +) + +func main() { + + loc, _ := time.LoadLocation("America/Toronto") + datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc) + + l := en_CA.New() + + // Dates + fmt.Println(l.FmtDateFull(datetime)) + fmt.Println(l.FmtDateLong(datetime)) + fmt.Println(l.FmtDateMedium(datetime)) + fmt.Println(l.FmtDateShort(datetime)) + + // Times + fmt.Println(l.FmtTimeFull(datetime)) + fmt.Println(l.FmtTimeLong(datetime)) + fmt.Println(l.FmtTimeMedium(datetime)) + fmt.Println(l.FmtTimeShort(datetime)) + + // Months Wide + fmt.Println(l.MonthWide(time.January)) + fmt.Println(l.MonthWide(time.February)) + fmt.Println(l.MonthWide(time.March)) + // ... + + // Months Abbreviated + fmt.Println(l.MonthAbbreviated(time.January)) + fmt.Println(l.MonthAbbreviated(time.February)) + fmt.Println(l.MonthAbbreviated(time.March)) + // ... + + // Months Narrow + fmt.Println(l.MonthNarrow(time.January)) + fmt.Println(l.MonthNarrow(time.February)) + fmt.Println(l.MonthNarrow(time.March)) + // ... + + // Weekdays Wide + fmt.Println(l.WeekdayWide(time.Sunday)) + fmt.Println(l.WeekdayWide(time.Monday)) + fmt.Println(l.WeekdayWide(time.Tuesday)) + // ... + + // Weekdays Abbreviated + fmt.Println(l.WeekdayAbbreviated(time.Sunday)) + fmt.Println(l.WeekdayAbbreviated(time.Monday)) + fmt.Println(l.WeekdayAbbreviated(time.Tuesday)) + // ... + + // Weekdays Short + fmt.Println(l.WeekdayShort(time.Sunday)) + fmt.Println(l.WeekdayShort(time.Monday)) + fmt.Println(l.WeekdayShort(time.Tuesday)) + // ... + + // Weekdays Narrow + fmt.Println(l.WeekdayNarrow(time.Sunday)) + fmt.Println(l.WeekdayNarrow(time.Monday)) + fmt.Println(l.WeekdayNarrow(time.Tuesday)) + // ... + + var f64 float64 + + f64 = -10356.4523 + + // Number + fmt.Println(l.FmtNumber(f64, 2)) + + // Currency + fmt.Println(l.FmtCurrency(f64, 2, currency.CAD)) + fmt.Println(l.FmtCurrency(f64, 2, currency.USD)) + + // Accounting + fmt.Println(l.FmtAccounting(f64, 2, currency.CAD)) + fmt.Println(l.FmtAccounting(f64, 2, currency.USD)) + + f64 = 78.12 + + // Percent + fmt.Println(l.FmtPercent(f64, 0)) + + // Plural Rules for locale, so you know what rules you must cover + fmt.Println(l.PluralsCardinal()) + fmt.Println(l.PluralsOrdinal()) + + // Cardinal Plural Rules + fmt.Println(l.CardinalPluralRule(1, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 1)) + fmt.Println(l.CardinalPluralRule(3, 0)) + + // Ordinal Plural Rules + fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st + fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd + fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd + fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th + + // Range Plural Rules + fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1 + fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2 + fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8 +} +``` + +NOTES: +------- +These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues +I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time +these locales are regenerated the fix will come with. + +I do however realize that time constraints are often important and so there are two options: + +1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface. +2. Add an exception in the locale generation code directly and once regenerated, fix will be in place. + +Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/github.com/go-playground/locales/currency/currency.go b/vendor/github.com/go-playground/locales/currency/currency.go new file mode 100644 index 000000000..cdaba596b --- /dev/null +++ b/vendor/github.com/go-playground/locales/currency/currency.go @@ -0,0 +1,308 @@ +package currency + +// Type is the currency type associated with the locales currency enum +type Type int + +// locale currencies +const ( + ADP Type = iota + AED + AFA + AFN + ALK + ALL + AMD + ANG + AOA + AOK + AON + AOR + ARA + ARL + ARM + ARP + ARS + ATS + AUD + AWG + AZM + AZN + BAD + BAM + BAN + BBD + BDT + BEC + BEF + BEL + BGL + BGM + BGN + BGO + BHD + BIF + BMD + BND + BOB + BOL + BOP + BOV + BRB + BRC + BRE + BRL + BRN + BRR + BRZ + BSD + BTN + BUK + BWP + BYB + BYN + BYR + BZD + CAD + CDF + CHE + CHF + CHW + CLE + CLF + CLP + CNH + CNX + CNY + COP + COU + CRC + CSD + CSK + CUC + CUP + CVE + CYP + CZK + DDM + DEM + DJF + DKK + DOP + DZD + ECS + ECV + EEK + EGP + ERN + ESA + ESB + ESP + ETB + EUR + FIM + FJD + FKP + FRF + GBP + GEK + GEL + GHC + GHS + GIP + GMD + GNF + GNS + GQE + GRD + GTQ + GWE + GWP + GYD + HKD + HNL + HRD + HRK + HTG + HUF + IDR + IEP + ILP + ILR + ILS + INR + IQD + IRR + ISJ + ISK + ITL + JMD + JOD + JPY + KES + KGS + KHR + KMF + KPW + KRH + KRO + KRW + KWD + KYD + KZT + LAK + LBP + LKR + LRD + LSL + LTL + LTT + LUC + LUF + LUL + LVL + LVR + LYD + MAD + MAF + MCF + MDC + MDL + MGA + MGF + MKD + MKN + MLF + MMK + MNT + MOP + MRO + MTL + MTP + MUR + MVP + MVR + MWK + MXN + MXP + MXV + MYR + MZE + MZM + MZN + NAD + NGN + NIC + NIO + NLG + NOK + NPR + NZD + OMR + PAB + PEI + PEN + PES + PGK + PHP + PKR + PLN + PLZ + PTE + PYG + QAR + RHD + ROL + RON + RSD + RUB + RUR + RWF + SAR + SBD + SCR + SDD + SDG + SDP + SEK + SGD + SHP + SIT + SKK + SLL + SOS + SRD + SRG + SSP + STD + STN + SUR + SVC + SYP + SZL + THB + TJR + TJS + TMM + TMT + TND + TOP + TPE + TRL + TRY + TTD + TWD + TZS + UAH + UAK + UGS + UGX + USD + USN + USS + UYI + UYP + UYU + UZS + VEB + VEF + VND + VNN + VUV + WST + XAF + XAG + XAU + XBA + XBB + XBC + XBD + XCD + XDR + XEU + XFO + XFU + XOF + XPD + XPF + XPT + XRE + XSU + XTS + XUA + XXX + YDD + YER + YUD + YUM + YUN + YUR + ZAL + ZAR + ZMK + ZMW + ZRN + ZRZ + ZWD + ZWL + ZWR +) diff --git a/vendor/github.com/go-playground/locales/go.mod b/vendor/github.com/go-playground/locales/go.mod new file mode 100644 index 000000000..34ab6f238 --- /dev/null +++ b/vendor/github.com/go-playground/locales/go.mod @@ -0,0 +1,5 @@ +module github.com/go-playground/locales + +go 1.13 + +require golang.org/x/text v0.3.2 diff --git a/vendor/github.com/go-playground/locales/go.sum b/vendor/github.com/go-playground/locales/go.sum new file mode 100644 index 000000000..63c9200f5 --- /dev/null +++ b/vendor/github.com/go-playground/locales/go.sum @@ -0,0 +1,3 @@ +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/go-playground/locales/logo.png b/vendor/github.com/go-playground/locales/logo.png new file mode 100644 index 000000000..3038276e6 Binary files /dev/null and b/vendor/github.com/go-playground/locales/logo.png differ diff --git a/vendor/github.com/go-playground/locales/rules.go b/vendor/github.com/go-playground/locales/rules.go new file mode 100644 index 000000000..920290014 --- /dev/null +++ b/vendor/github.com/go-playground/locales/rules.go @@ -0,0 +1,293 @@ +package locales + +import ( + "strconv" + "time" + + "github.com/go-playground/locales/currency" +) + +// // ErrBadNumberValue is returned when the number passed for +// // plural rule determination cannot be parsed +// type ErrBadNumberValue struct { +// NumberValue string +// InnerError error +// } + +// // Error returns ErrBadNumberValue error string +// func (e *ErrBadNumberValue) Error() string { +// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError) +// } + +// var _ error = new(ErrBadNumberValue) + +// PluralRule denotes the type of plural rules +type PluralRule int + +// PluralRule's +const ( + PluralRuleUnknown PluralRule = iota + PluralRuleZero // zero + PluralRuleOne // one - singular + PluralRuleTwo // two - dual + PluralRuleFew // few - paucal + PluralRuleMany // many - also used for fractions if they have a separate class + PluralRuleOther // other - required—general plural form—also used if the language only has a single form +) + +const ( + pluralsString = "UnknownZeroOneTwoFewManyOther" +) + +// Translator encapsulates an instance of a locale +// NOTE: some values are returned as a []byte just in case the caller +// wishes to add more and can help avoid allocations; otherwise just cast as string +type Translator interface { + + // The following Functions are for overriding, debugging or developing + // with a Translator Locale + + // Locale returns the string value of the translator + Locale() string + + // returns an array of cardinal plural rules associated + // with this translator + PluralsCardinal() []PluralRule + + // returns an array of ordinal plural rules associated + // with this translator + PluralsOrdinal() []PluralRule + + // returns an array of range plural rules associated + // with this translator + PluralsRange() []PluralRule + + // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale + CardinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale + OrdinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale + RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule + + // returns the locales abbreviated month given the 'month' provided + MonthAbbreviated(month time.Month) string + + // returns the locales abbreviated months + MonthsAbbreviated() []string + + // returns the locales narrow month given the 'month' provided + MonthNarrow(month time.Month) string + + // returns the locales narrow months + MonthsNarrow() []string + + // returns the locales wide month given the 'month' provided + MonthWide(month time.Month) string + + // returns the locales wide months + MonthsWide() []string + + // returns the locales abbreviated weekday given the 'weekday' provided + WeekdayAbbreviated(weekday time.Weekday) string + + // returns the locales abbreviated weekdays + WeekdaysAbbreviated() []string + + // returns the locales narrow weekday given the 'weekday' provided + WeekdayNarrow(weekday time.Weekday) string + + // WeekdaysNarrowreturns the locales narrow weekdays + WeekdaysNarrow() []string + + // returns the locales short weekday given the 'weekday' provided + WeekdayShort(weekday time.Weekday) string + + // returns the locales short weekdays + WeekdaysShort() []string + + // returns the locales wide weekday given the 'weekday' provided + WeekdayWide(weekday time.Weekday) string + + // returns the locales wide weekdays + WeekdaysWide() []string + + // The following Functions are common Formatting functionsfor the Translator's Locale + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + FmtNumber(num float64, v uint64) string + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + // NOTE: 'num' passed into FmtPercent is assumed to be in percent already + FmtPercent(num float64, v uint64) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + FmtCurrency(num float64, v uint64, currency currency.Type) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + // in accounting notation. + FmtAccounting(num float64, v uint64, currency currency.Type) string + + // returns the short date representation of 't' for locale + FmtDateShort(t time.Time) string + + // returns the medium date representation of 't' for locale + FmtDateMedium(t time.Time) string + + // returns the long date representation of 't' for locale + FmtDateLong(t time.Time) string + + // returns the full date representation of 't' for locale + FmtDateFull(t time.Time) string + + // returns the short time representation of 't' for locale + FmtTimeShort(t time.Time) string + + // returns the medium time representation of 't' for locale + FmtTimeMedium(t time.Time) string + + // returns the long time representation of 't' for locale + FmtTimeLong(t time.Time) string + + // returns the full time representation of 't' for locale + FmtTimeFull(t time.Time) string +} + +// String returns the string value of PluralRule +func (p PluralRule) String() string { + + switch p { + case PluralRuleZero: + return pluralsString[7:11] + case PluralRuleOne: + return pluralsString[11:14] + case PluralRuleTwo: + return pluralsString[14:17] + case PluralRuleFew: + return pluralsString[17:20] + case PluralRuleMany: + return pluralsString[20:24] + case PluralRuleOther: + return pluralsString[24:] + default: + return pluralsString[:7] + } +} + +// +// Precision Notes: +// +// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh +// +// v := float64(3.141) +// i := float64(int64(v)) +// +// fmt.Println(v - i) +// +// or +// +// s := strconv.FormatFloat(v-i, 'f', -1, 64) +// fmt.Println(s) +// +// these will not print what you'd expect: 0.14100000000000001 +// and so this library requires a precision to be specified, or +// inaccurate plural rules could be applied. +// +// +// +// n - absolute value of the source number (integer and decimals). +// i - integer digits of n. +// v - number of visible fraction digits in n, with trailing zeros. +// w - number of visible fraction digits in n, without trailing zeros. +// f - visible fractional digits in n, with trailing zeros. +// t - visible fractional digits in n, without trailing zeros. +// +// +// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above. +// +// n := math.Abs(num) +// i := int64(n) +// v := v +// +// +// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64 +// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// +// +// +// General Inclusion Rules +// - v will always be available inherently +// - all require n +// - w requires i +// + +// W returns the number of visible fraction digits in N, without trailing zeros. +func W(n float64, v uint64) (w int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then w will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + w = int64(len(s[:end])) + } + + return +} + +// F returns the visible fractional digits in N, with trailing zeros. +func F(n float64, v uint64) (f int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then f will be zero + // otherwise need to parse + if len(s) != 1 { + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + f, _ = strconv.ParseInt(s[2:], 10, 64) + } + + return +} + +// T returns the visible fractional digits in N, without trailing zeros. +func T(n float64, v uint64) (t int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then t will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + t, _ = strconv.ParseInt(s[:end], 10, 64) + } + + return +} diff --git a/vendor/github.com/go-playground/universal-translator/.gitignore b/vendor/github.com/go-playground/universal-translator/.gitignore new file mode 100644 index 000000000..bc4e07f34 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.coverprofile \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/.travis.yml b/vendor/github.com/go-playground/universal-translator/.travis.yml new file mode 100644 index 000000000..39b8b923e --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/.travis.yml @@ -0,0 +1,27 @@ +language: go +go: + - 1.13.4 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + [ $TRAVIS_GO_VERSION = 1.13.4 ] && + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/vendor/github.com/go-playground/universal-translator/LICENSE b/vendor/github.com/go-playground/universal-translator/LICENSE new file mode 100644 index 000000000..8d8aba15b --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/go-playground/universal-translator/README.md b/vendor/github.com/go-playground/universal-translator/README.md new file mode 100644 index 000000000..071f33ab2 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/README.md @@ -0,0 +1,89 @@ +## universal-translator +![Project status](https://img.shields.io/badge/version-0.17.0-green.svg) +[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator) +[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) +[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules + +Why another i18n library? +-------------------------- +Because none of the plural rules seem to be correct out there, including the previous implementation of this package, +so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package +is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for +use in your applications. + +Features +-------- +- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) +- [x] Support loading translations from files +- [x] Exporting translations to file(s), mainly for getting them professionally translated +- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated +- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1) + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/universal-translator +``` + +Usage & Documentation +------- + +Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs + +##### Examples: + +- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic) +- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files) +- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files) + +File formatting +-------------- +All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s); +they are only separated for easy viewing. + +##### Examples: + +- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats) + +##### Basic Makeup +NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats) +```json +{ + "locale": "en", + "key": "days-left", + "trans": "You have {0} day left.", + "type": "Cardinal", + "rule": "One", + "override": false +} +``` +|Field|Description| +|---|---| +|locale|The locale for which the translation is for.| +|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.| +|trans|The actual translation text.| +|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)| +|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)| +|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.| + +Help With Tests +--------------- +To anyone interesting in helping or contributing, I sure could use some help creating tests for each language. +Please see issue [here](https://github.com/go-playground/locales/issues/1) for details. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/vendor/github.com/go-playground/universal-translator/errors.go b/vendor/github.com/go-playground/universal-translator/errors.go new file mode 100644 index 000000000..38b163b62 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/errors.go @@ -0,0 +1,148 @@ +package ut + +import ( + "errors" + "fmt" + + "github.com/go-playground/locales" +) + +var ( + // ErrUnknowTranslation indicates the translation could not be found + ErrUnknowTranslation = errors.New("Unknown Translation") +) + +var _ error = new(ErrConflictingTranslation) +var _ error = new(ErrRangeTranslation) +var _ error = new(ErrOrdinalTranslation) +var _ error = new(ErrCardinalTranslation) +var _ error = new(ErrMissingPluralTranslation) +var _ error = new(ErrExistingTranslator) + +// ErrExistingTranslator is the error representing a conflicting translator +type ErrExistingTranslator struct { + locale string +} + +// Error returns ErrExistingTranslator's internal error text +func (e *ErrExistingTranslator) Error() string { + return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale) +} + +// ErrConflictingTranslation is the error representing a conflicting translation +type ErrConflictingTranslation struct { + locale string + key interface{} + rule locales.PluralRule + text string +} + +// Error returns ErrConflictingTranslation's internal error text +func (e *ErrConflictingTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) + } + + return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) +} + +// ErrRangeTranslation is the error representing a range translation error +type ErrRangeTranslation struct { + text string +} + +// Error returns ErrRangeTranslation's internal error text +func (e *ErrRangeTranslation) Error() string { + return e.text +} + +// ErrOrdinalTranslation is the error representing an ordinal translation error +type ErrOrdinalTranslation struct { + text string +} + +// Error returns ErrOrdinalTranslation's internal error text +func (e *ErrOrdinalTranslation) Error() string { + return e.text +} + +// ErrCardinalTranslation is the error representing a cardinal translation error +type ErrCardinalTranslation struct { + text string +} + +// Error returns ErrCardinalTranslation's internal error text +func (e *ErrCardinalTranslation) Error() string { + return e.text +} + +// ErrMissingPluralTranslation is the error signifying a missing translation given +// the locales plural rules. +type ErrMissingPluralTranslation struct { + locale string + key interface{} + rule locales.PluralRule + translationType string +} + +// Error returns ErrMissingPluralTranslation's internal error text +func (e *ErrMissingPluralTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale) + } + + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale) +} + +// ErrMissingBracket is the error representing a missing bracket in a translation +// eg. This is a {0 <-- missing ending '}' +type ErrMissingBracket struct { + locale string + key interface{} + text string +} + +// Error returns ErrMissingBracket error message +func (e *ErrMissingBracket) Error() string { + return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text) +} + +// ErrBadParamSyntax is the error representing a bad parameter definition in a translation +// eg. This is a {must-be-int} +type ErrBadParamSyntax struct { + locale string + param string + key interface{} + text string +} + +// Error returns ErrBadParamSyntax error message +func (e *ErrBadParamSyntax) Error() string { + return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text) +} + +// import/export errors + +// ErrMissingLocale is the error representing an expected locale that could +// not be found aka locale not registered with the UniversalTranslator Instance +type ErrMissingLocale struct { + locale string +} + +// Error returns ErrMissingLocale's internal error text +func (e *ErrMissingLocale) Error() string { + return fmt.Sprintf("error: locale '%s' not registered.", e.locale) +} + +// ErrBadPluralDefinition is the error representing an incorrect plural definition +// usually found within translations defined within files during the import process. +type ErrBadPluralDefinition struct { + tl translation +} + +// Error returns ErrBadPluralDefinition's internal error text +func (e *ErrBadPluralDefinition) Error() string { + return fmt.Sprintf("error: bad plural definition '%#v'", e.tl) +} diff --git a/vendor/github.com/go-playground/universal-translator/go.mod b/vendor/github.com/go-playground/universal-translator/go.mod new file mode 100644 index 000000000..8079590fe --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/go.mod @@ -0,0 +1,5 @@ +module github.com/go-playground/universal-translator + +go 1.13 + +require github.com/go-playground/locales v0.13.0 diff --git a/vendor/github.com/go-playground/universal-translator/go.sum b/vendor/github.com/go-playground/universal-translator/go.sum new file mode 100644 index 000000000..cbbf32416 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/go.sum @@ -0,0 +1,4 @@ +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/go-playground/universal-translator/import_export.go b/vendor/github.com/go-playground/universal-translator/import_export.go new file mode 100644 index 000000000..7bd76f26b --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/import_export.go @@ -0,0 +1,274 @@ +package ut + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "io" + + "github.com/go-playground/locales" +) + +type translation struct { + Locale string `json:"locale"` + Key interface{} `json:"key"` // either string or integer + Translation string `json:"trans"` + PluralType string `json:"type,omitempty"` + PluralRule string `json:"rule,omitempty"` + OverrideExisting bool `json:"override,omitempty"` +} + +const ( + cardinalType = "Cardinal" + ordinalType = "Ordinal" + rangeType = "Range" +) + +// ImportExportFormat is the format of the file import or export +type ImportExportFormat uint8 + +// supported Export Formats +const ( + FormatJSON ImportExportFormat = iota +) + +// Export writes the translations out to a file on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error { + + _, err := os.Stat(dirname) + fmt.Println(dirname, err, os.IsNotExist(err)) + if err != nil { + + if !os.IsNotExist(err) { + return err + } + + if err = os.MkdirAll(dirname, 0744); err != nil { + return err + } + } + + // build up translations + var trans []translation + var b []byte + var ext string + + for _, locale := range t.translators { + + for k, v := range locale.(*translator).translations { + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k, + Translation: v.text, + }) + } + + for k, pluralTrans := range locale.(*translator).cardinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: cardinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).ordinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: ordinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).rangeTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: rangeType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + switch format { + case FormatJSON: + b, err = json.MarshalIndent(trans, "", " ") + ext = ".json" + } + + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) + if err != nil { + return err + } + + trans = trans[0:0] + } + + return nil +} + +// Import reads the translations out of a file or directory on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error { + + fi, err := os.Stat(dirnameOrFilename) + if err != nil { + return err + } + + processFn := func(filename string) error { + + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + return t.ImportByReader(format, f) + } + + if !fi.IsDir() { + return processFn(dirnameOrFilename) + } + + // recursively go through directory + walker := func(path string, info os.FileInfo, err error) error { + + if info.IsDir() { + return nil + } + + switch format { + case FormatJSON: + // skip non JSON files + if filepath.Ext(info.Name()) != ".json" { + return nil + } + } + + return processFn(path) + } + + return filepath.Walk(dirnameOrFilename, walker) +} + +// ImportByReader imports the the translations found within the contents read from the supplied reader. +// +// NOTE: generally used when assets have been embedded into the binary and are already in memory. +func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error { + + b, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + var trans []translation + + switch format { + case FormatJSON: + err = json.Unmarshal(b, &trans) + } + + if err != nil { + return err + } + + for _, tl := range trans { + + locale, found := t.FindTranslator(tl.Locale) + if !found { + return &ErrMissingLocale{locale: tl.Locale} + } + + pr := stringToPR(tl.PluralRule) + + if pr == locales.PluralRuleUnknown { + + err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting) + if err != nil { + return err + } + + continue + } + + switch tl.PluralType { + case cardinalType: + err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case ordinalType: + err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case rangeType: + err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting) + default: + return &ErrBadPluralDefinition{tl: tl} + } + + if err != nil { + return err + } + } + + return nil +} + +func stringToPR(s string) locales.PluralRule { + + switch s { + case "One": + return locales.PluralRuleOne + case "Two": + return locales.PluralRuleTwo + case "Few": + return locales.PluralRuleFew + case "Many": + return locales.PluralRuleMany + case "Other": + return locales.PluralRuleOther + default: + return locales.PluralRuleUnknown + } + +} diff --git a/vendor/github.com/go-playground/universal-translator/logo.png b/vendor/github.com/go-playground/universal-translator/logo.png new file mode 100644 index 000000000..a37aa8c0c Binary files /dev/null and b/vendor/github.com/go-playground/universal-translator/logo.png differ diff --git a/vendor/github.com/go-playground/universal-translator/translator.go b/vendor/github.com/go-playground/universal-translator/translator.go new file mode 100644 index 000000000..cfafce8a0 --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/translator.go @@ -0,0 +1,420 @@ +package ut + +import ( + "fmt" + "strconv" + "strings" + + "github.com/go-playground/locales" +) + +const ( + paramZero = "{0}" + paramOne = "{1}" + unknownTranslation = "" +) + +// Translator is universal translators +// translator instance which is a thin wrapper +// around locales.Translator instance providing +// some extra functionality +type Translator interface { + locales.Translator + + // adds a normal translation for a particular language/locale + // {#} is the only replacement type accepted and are ad infinitum + // eg. one: '{0} day left' other: '{0} days left' + Add(key interface{}, text string, override bool) error + + // adds a cardinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0} day left' other: '{0} days left' + AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds an ordinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' + // - 1st, 2nd, 3rd... + AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds a range plural translation for a particular language/locale + // {0} and {1} are the only replacement types accepted and only these are accepted. + // eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' + AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error + + // creates the translation for the locale given the 'key' and params passed in + T(key interface{}, params ...string) (string, error) + + // creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + C(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + O(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and + // 'digit2' arguments and 'param1' and 'param2' passed in + R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) + + // VerifyTranslations checks to ensures that no plural rules have been + // missed within the translations. + VerifyTranslations() error +} + +var _ Translator = new(translator) +var _ locales.Translator = new(translator) + +type translator struct { + locales.Translator + translations map[interface{}]*transText + cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown + ordinalTanslations map[interface{}][]*transText + rangeTanslations map[interface{}][]*transText +} + +type transText struct { + text string + indexes []int +} + +func newTranslator(trans locales.Translator) Translator { + return &translator{ + Translator: trans, + translations: make(map[interface{}]*transText), // translation text broken up by byte index + cardinalTanslations: make(map[interface{}][]*transText), + ordinalTanslations: make(map[interface{}][]*transText), + rangeTanslations: make(map[interface{}][]*transText), + } +} + +// Add adds a normal translation for a particular language/locale +// {#} is the only replacement type accepted and are ad infinitum +// eg. one: '{0} day left' other: '{0} days left' +func (t *translator) Add(key interface{}, text string, override bool) error { + + if _, ok := t.translations[key]; ok && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text} + } + + lb := strings.Count(text, "{") + rb := strings.Count(text, "}") + + if lb != rb { + return &ErrMissingBracket{locale: t.Locale(), key: key, text: text} + } + + trans := &transText{ + text: text, + } + + var idx int + + for i := 0; i < lb; i++ { + s := "{" + strconv.Itoa(i) + "}" + idx = strings.Index(text, s) + if idx == -1 { + return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text} + } + + trans.indexes = append(trans.indexes, idx) + trans.indexes = append(trans.indexes, idx+len(s)) + } + + t.translations[key] = trans + + return nil +} + +// AddCardinal adds a cardinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0} day left' other: '{0} days left' +func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsCardinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.cardinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.cardinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddOrdinal adds an ordinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd... +func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsOrdinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.ordinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.ordinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddRange adds a range plural translation for a particular language/locale +// {0} and {1} are the only replacement types accepted and only these are accepted. +// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' +func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsRange() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.rangeTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.rangeTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 4, 4), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + idx = strings.Index(text, paramOne) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)} + } + + trans.indexes[2] = idx + trans.indexes[3] = idx + len(paramOne) + + return nil +} + +// T creates the translation for the locale given the 'key' and params passed in +func (t *translator) T(key interface{}, params ...string) (string, error) { + + trans, ok := t.translations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + b := make([]byte, 0, 64) + + var start, end, count int + + for i := 0; i < len(trans.indexes); i++ { + end = trans.indexes[i] + b = append(b, trans.text[start:end]...) + b = append(b, params[count]...) + i++ + start = trans.indexes[i] + count++ + } + + b = append(b, trans.text[start:]...) + + return string(b), nil +} + +// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.cardinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.CardinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.ordinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.OrdinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments +// and 'param1' and 'param2' passed in +func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) { + + tarr, ok := t.rangeTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.RangePluralRule(num1, digits1, num2, digits2) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param1...) + b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...) + b = append(b, param2...) + b = append(b, trans.text[trans.indexes[3]:]...) + + return string(b), nil +} + +// VerifyTranslations checks to ensures that no plural rules have been +// missed within the translations. +func (t *translator) VerifyTranslations() error { + + for k, v := range t.cardinalTanslations { + + for _, rule := range t.PluralsCardinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k} + } + } + } + + for k, v := range t.ordinalTanslations { + + for _, rule := range t.PluralsOrdinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k} + } + } + } + + for k, v := range t.rangeTanslations { + + for _, rule := range t.PluralsRange() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k} + } + } + } + + return nil +} diff --git a/vendor/github.com/go-playground/universal-translator/universal_translator.go b/vendor/github.com/go-playground/universal-translator/universal_translator.go new file mode 100644 index 000000000..dbf707f5c --- /dev/null +++ b/vendor/github.com/go-playground/universal-translator/universal_translator.go @@ -0,0 +1,113 @@ +package ut + +import ( + "strings" + + "github.com/go-playground/locales" +) + +// UniversalTranslator holds all locale & translation data +type UniversalTranslator struct { + translators map[string]Translator + fallback Translator +} + +// New returns a new UniversalTranslator instance set with +// the fallback locale and locales it should support +func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator { + + t := &UniversalTranslator{ + translators: make(map[string]Translator), + } + + for _, v := range supportedLocales { + + trans := newTranslator(v) + t.translators[strings.ToLower(trans.Locale())] = trans + + if fallback.Locale() == v.Locale() { + t.fallback = trans + } + } + + if t.fallback == nil && fallback != nil { + t.fallback = newTranslator(fallback) + } + + return t +} + +// FindTranslator trys to find a Translator based on an array of locales +// and returns the first one it can find, otherwise returns the +// fallback translator. +func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) { + + for _, locale := range locales { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + } + + return t.fallback, false +} + +// GetTranslator returns the specified translator for the given locale, +// or fallback if not found +func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + + return t.fallback, false +} + +// GetFallback returns the fallback locale +func (t *UniversalTranslator) GetFallback() Translator { + return t.fallback +} + +// AddTranslator adds the supplied translator, if it already exists the override param +// will be checked and if false an error will be returned, otherwise the translator will be +// overridden; if the fallback matches the supplied translator it will be overridden as well +// NOTE: this is normally only used when translator is embedded within a library +func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error { + + lc := strings.ToLower(translator.Locale()) + _, ok := t.translators[lc] + if ok && !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + trans := newTranslator(translator) + + if t.fallback.Locale() == translator.Locale() { + + // because it's optional to have a fallback, I don't impose that limitation + // don't know why you wouldn't but... + if !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + t.fallback = trans + } + + t.translators[lc] = trans + + return nil +} + +// VerifyTranslations runs through all locales and identifies any issues +// eg. missing plural rules for a locale +func (t *UniversalTranslator) VerifyTranslations() (err error) { + + for _, trans := range t.translators { + err = trans.VerifyTranslations() + if err != nil { + return + } + } + + return +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/.gitignore b/vendor/github.com/go-playground/validator/v10/.gitignore similarity index 94% rename from vendor/gopkg.in/go-playground/validator.v8/.gitignore rename to vendor/github.com/go-playground/validator/v10/.gitignore index 792ca00d2..6e43fac0d 100644 --- a/vendor/gopkg.in/go-playground/validator.v8/.gitignore +++ b/vendor/github.com/go-playground/validator/v10/.gitignore @@ -6,6 +6,7 @@ # Folders _obj _test +bin # Architecture specific extensions/prefixes *.[568vq] @@ -26,4 +27,4 @@ _testmain.go *.out *.txt cover.html -README.html \ No newline at end of file +README.html diff --git a/vendor/github.com/go-playground/validator/v10/.travis.yml b/vendor/github.com/go-playground/validator/v10/.travis.yml new file mode 100644 index 000000000..85a7be34b --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/.travis.yml @@ -0,0 +1,29 @@ +language: go +go: + - 1.15.2 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + - mkdir -p $GOPATH/src/gopkg.in + - ln -s $GOPATH/src/github.com/$TRAVIS_REPO_SLUG $GOPATH/src/gopkg.in/validator.v9 + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + [ $TRAVIS_GO_VERSION = 1.15.2 ] && + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/gopkg.in/go-playground/validator.v8/LICENSE b/vendor/github.com/go-playground/validator/v10/LICENSE similarity index 100% rename from vendor/gopkg.in/go-playground/validator.v8/LICENSE rename to vendor/github.com/go-playground/validator/v10/LICENSE diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile new file mode 100644 index 000000000..19c91ed73 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/Makefile @@ -0,0 +1,18 @@ +GOCMD=GO111MODULE=on go + +linters-install: + @golangci-lint --version >/dev/null 2>&1 || { \ + echo "installing linting tools..."; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.21.0; \ + } + +lint: linters-install + $(PWD)/bin/golangci-lint run + +test: + $(GOCMD) test -cover -race ./... + +bench: + $(GOCMD) test -bench=. -benchmem ./... + +.PHONY: test lint linters-install \ No newline at end of file diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md new file mode 100644 index 000000000..04fbb3c86 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -0,0 +1,299 @@ +Package validator +================ +[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +![Project status](https://img.shields.io/badge/version-10.4.1-green.svg) +[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) +[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) +[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Package validator implements value validations for structs and individual fields based on tags. + +It has the following **unique** features: + +- Cross Field and Cross Struct validations by using validation tags or custom validators. +- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated. +- Ability to dive into both map keys and values for validation +- Handles type interface by determining it's underlying type prior to validation. +- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29) +- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs +- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError +- Customizable i18n aware error messages. +- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding) + +Installation +------------ + +Use go get. + + go get github.com/go-playground/validator/v10 + +Then import the validator package into your own code. + + import "github.com/go-playground/validator/v10" + +Error Return Value +------- + +Validation functions return type error + +They return type error to avoid the issue discussed in the following, where err is always != nil: + +* http://stackoverflow.com/a/29138676/3158232 +* https://github.com/go-playground/validator/issues/134 + +Validator only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so: + +```go +err := validate.Struct(mystruct) +validationErrors := err.(validator.ValidationErrors) + ``` + +Usage and documentation +------ + +Please see https://godoc.org/github.com/go-playground/validator for detailed usage docs. + +##### Examples: + +- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go) +- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go) +- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go) +- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go) +- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding) +- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash) + +Baked-in Validations +------ + +### Fields: + +| Tag | Description | +| - | - | +| eqcsfield | Field Equals Another Field (relative)| +| eqfield | Field Equals Another Field | +| fieldcontains | NOT DOCUMENTED IN doc.go | +| fieldexcludes | NOT DOCUMENTED IN doc.go | +| gtcsfield | Field Greater Than Another Relative Field | +| gtecsfield | Field Greater Than or Equal To Another Relative Field | +| gtefield | Field Greater Than or Equal To Another Field | +| gtfield | Field Greater Than Another Field | +| ltcsfield | Less Than Another Relative Field | +| ltecsfield | Less Than or Equal To Another Relative Field | +| ltefield | Less Than or Equal To Another Field | +| ltfield | Less Than Another Field | +| necsfield | Field Does Not Equal Another Field (relative) | +| nefield | Field Does Not Equal Another Field | + +### Network: + +| Tag | Description | +| - | - | +| cidr | Classless Inter-Domain Routing CIDR | +| cidrv4 | Classless Inter-Domain Routing CIDRv4 | +| cidrv6 | Classless Inter-Domain Routing CIDRv6 | +| datauri | Data URL | +| fqdn | Full Qualified Domain Name (FQDN) | +| hostname | Hostname RFC 952 | +| hostname_port | HostPort | +| hostname_rfc1123 | Hostname RFC 1123 | +| ip | Internet Protocol Address IP | +| ip4_addr | Internet Protocol Address IPv4 | +| ip6_addr |Internet Protocol Address IPv6 | +| ip_addr | Internet Protocol Address IP | +| ipv4 | Internet Protocol Address IPv4 | +| ipv6 | Internet Protocol Address IPv6 | +| mac | Media Access Control Address MAC | +| tcp4_addr | Transmission Control Protocol Address TCPv4 | +| tcp6_addr | Transmission Control Protocol Address TCPv6 | +| tcp_addr | Transmission Control Protocol Address TCP | +| udp4_addr | User Datagram Protocol Address UDPv4 | +| udp6_addr | User Datagram Protocol Address UDPv6 | +| udp_addr | User Datagram Protocol Address UDP | +| unix_addr | Unix domain socket end point Address | +| uri | URI String | +| url | URL String | +| url_encoded | URL Encoded | +| urn_rfc2141 | Urn RFC 2141 String | + +### Strings: + +| Tag | Description | +| - | - | +| alpha | Alpha Only | +| alphanum | Alphanumeric | +| alphanumunicode | Alphanumeric Unicode | +| alphaunicode | Alpha Unicode | +| ascii | ASCII | +| contains | Contains | +| containsany | Contains Any | +| containsrune | Contains Rune | +| endswith | Ends With | +| lowercase | Lowercase | +| multibyte | Multi-Byte Characters | +| number | NOT DOCUMENTED IN doc.go | +| numeric | Numeric | +| printascii | Printable ASCII | +| startswith | Starts With | +| uppercase | Uppercase | + +### Format: +| Tag | Description | +| - | - | +| base64 | Base64 String | +| base64url | Base64URL String | +| btc_addr | Bitcoin Address | +| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) | +| datetime | Datetime | +| e164 | e164 formatted phone number | +| email | E-mail String +| eth_addr | Ethereum Address | +| hexadecimal | Hexadecimal String | +| hexcolor | Hexcolor String | +| hsl | HSL String | +| hsla | HSLA String | +| html | HTML Tags | +| html_encoded | HTML Encoded | +| isbn | International Standard Book Number | +| isbn10 | International Standard Book Number 10 | +| isbn13 | International Standard Book Number 13 | +| json | JSON | +| latitude | Latitude | +| longitude | Longitude | +| rgb | RGB String | +| rgba | RGBA String | +| ssn | Social Security Number SSN | +| uuid | Universally Unique Identifier UUID | +| uuid3 | Universally Unique Identifier UUID v3 | +| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 | +| uuid4 | Universally Unique Identifier UUID v4 | +| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 | +| uuid5 | Universally Unique Identifier UUID v5 | +| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 | +| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 | + +### Comparisons: +| Tag | Description | +| - | - | +| eq | Equals | +| gt | Greater than| +| gte |Greater than or equal | +| lt | Less Than | +| lte | Less Than or Equal | +| ne | Not Equal | + +### Other: +| Tag | Description | +| - | - | +| dir | Directory | +| endswith | Ends With | +| excludes | Excludes | +| excludesall | Excludes All | +| excludesrune | Excludes Rune | +| file | File path | +| isdefault | Is Default | +| len | Length | +| max | Maximum | +| min | Minimum | +| oneof | One Of | +| required | Required | +| required_if | Required If | +| required_unless | Required Unless | +| required_with | Required With | +| required_with_all | Required With All | +| required_without | Required Without | +| required_without_all | Required Without All | +| excluded_with | Excluded With | +| excluded_with_all | Excluded With All | +| excluded_without | Excluded Without | +| excluded_without_all | Excluded Without All | +| unique | Unique | + +Benchmarks +------ +###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64 +```go +goos: darwin +goarch: amd64 +pkg: github.com/go-playground/validator +BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op +BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op +BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op +BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op +BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op +BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op +BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op +BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op +BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op +BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op +BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op +BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op +BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op +BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op +BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op +BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op +BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op +BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op +BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op +BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op +BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op +BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op +BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op +BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op +BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op +BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op +BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op +BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op +BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op +BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op +BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op +BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op +BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op +BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op +BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op +BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op +BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op +BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op +BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op +BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op +BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op +``` + +Complementary Software +---------------------- + +Here is a list of software that complements using this library either pre or post validation. + +* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support. +* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects + +How to Contribute +------ + +Make a pull request... + +License +------ +Distributed under MIT License, please see license file within the code for more details. diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go new file mode 100644 index 000000000..6ce762d15 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -0,0 +1,2285 @@ +package validator + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "golang.org/x/crypto/sha3" + + urn "github.com/leodido/go-urn" +) + +// Func accepts a FieldLevel interface for all validation needs. The return +// value should be true when validation succeeds. +type Func func(fl FieldLevel) bool + +// FuncCtx accepts a context.Context and FieldLevel interface for all +// validation needs. The return value should be true when validation succeeds. +type FuncCtx func(ctx context.Context, fl FieldLevel) bool + +// wrapFunc wraps noramal Func makes it compatible with FuncCtx +func wrapFunc(fn Func) FuncCtx { + if fn == nil { + return nil // be sure not to wrap a bad function. + } + return func(ctx context.Context, fl FieldLevel) bool { + return fn(fl) + } +} + +var ( + restrictedTags = map[string]struct{}{ + diveTag: {}, + keysTag: {}, + endKeysTag: {}, + structOnlyTag: {}, + omitempty: {}, + skipValidationTag: {}, + utf8HexComma: {}, + utf8Pipe: {}, + noStructLevelTag: {}, + requiredTag: {}, + isdefault: {}, + } + + // BakedInAliasValidators is a default mapping of a single validation tag that + // defines a common or complex set of validation(s) to simplify + // adding validation to structs. + bakedInAliases = map[string]string{ + "iscolor": "hexcolor|rgb|rgba|hsl|hsla", + "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric", + } + + // BakedInValidators is the default map of ValidationFunc + // you can add, remove or even replace items to suite your needs, + // or even disregard and use your own map if so desired. + bakedInValidators = map[string]Func{ + "required": hasValue, + "required_if": requiredIf, + "required_unless": requiredUnless, + "required_with": requiredWith, + "required_with_all": requiredWithAll, + "required_without": requiredWithout, + "required_without_all": requiredWithoutAll, + "excluded_with": excludedWith, + "excluded_with_all": excludedWithAll, + "excluded_without": excludedWithout, + "excluded_without_all": excludedWithoutAll, + "isdefault": isDefault, + "len": hasLengthOf, + "min": hasMinOf, + "max": hasMaxOf, + "eq": isEq, + "ne": isNe, + "lt": isLt, + "lte": isLte, + "gt": isGt, + "gte": isGte, + "eqfield": isEqField, + "eqcsfield": isEqCrossStructField, + "necsfield": isNeCrossStructField, + "gtcsfield": isGtCrossStructField, + "gtecsfield": isGteCrossStructField, + "ltcsfield": isLtCrossStructField, + "ltecsfield": isLteCrossStructField, + "nefield": isNeField, + "gtefield": isGteField, + "gtfield": isGtField, + "ltefield": isLteField, + "ltfield": isLtField, + "fieldcontains": fieldContains, + "fieldexcludes": fieldExcludes, + "alpha": isAlpha, + "alphanum": isAlphanum, + "alphaunicode": isAlphaUnicode, + "alphanumunicode": isAlphanumUnicode, + "numeric": isNumeric, + "number": isNumber, + "hexadecimal": isHexadecimal, + "hexcolor": isHEXColor, + "rgb": isRGB, + "rgba": isRGBA, + "hsl": isHSL, + "hsla": isHSLA, + "e164": isE164, + "email": isEmail, + "url": isURL, + "uri": isURI, + "urn_rfc2141": isUrnRFC2141, // RFC 2141 + "file": isFile, + "base64": isBase64, + "base64url": isBase64URL, + "contains": contains, + "containsany": containsAny, + "containsrune": containsRune, + "excludes": excludes, + "excludesall": excludesAll, + "excludesrune": excludesRune, + "startswith": startsWith, + "endswith": endsWith, + "startsnotwith": startsNotWith, + "endsnotwith": endsNotWith, + "isbn": isISBN, + "isbn10": isISBN10, + "isbn13": isISBN13, + "eth_addr": isEthereumAddress, + "btc_addr": isBitcoinAddress, + "btc_addr_bech32": isBitcoinBech32Address, + "uuid": isUUID, + "uuid3": isUUID3, + "uuid4": isUUID4, + "uuid5": isUUID5, + "uuid_rfc4122": isUUIDRFC4122, + "uuid3_rfc4122": isUUID3RFC4122, + "uuid4_rfc4122": isUUID4RFC4122, + "uuid5_rfc4122": isUUID5RFC4122, + "ascii": isASCII, + "printascii": isPrintableASCII, + "multibyte": hasMultiByteCharacter, + "datauri": isDataURI, + "latitude": isLatitude, + "longitude": isLongitude, + "ssn": isSSN, + "ipv4": isIPv4, + "ipv6": isIPv6, + "ip": isIP, + "cidrv4": isCIDRv4, + "cidrv6": isCIDRv6, + "cidr": isCIDR, + "tcp4_addr": isTCP4AddrResolvable, + "tcp6_addr": isTCP6AddrResolvable, + "tcp_addr": isTCPAddrResolvable, + "udp4_addr": isUDP4AddrResolvable, + "udp6_addr": isUDP6AddrResolvable, + "udp_addr": isUDPAddrResolvable, + "ip4_addr": isIP4AddrResolvable, + "ip6_addr": isIP6AddrResolvable, + "ip_addr": isIPAddrResolvable, + "unix_addr": isUnixAddrResolvable, + "mac": isMAC, + "hostname": isHostnameRFC952, // RFC 952 + "hostname_rfc1123": isHostnameRFC1123, // RFC 1123 + "fqdn": isFQDN, + "unique": isUnique, + "oneof": isOneOf, + "html": isHTML, + "html_encoded": isHTMLEncoded, + "url_encoded": isURLEncoded, + "dir": isDir, + "json": isJSON, + "hostname_port": isHostnamePort, + "lowercase": isLowercase, + "uppercase": isUppercase, + "datetime": isDatetime, + "timezone": isTimeZone, + "iso3166_1_alpha2": isIso3166Alpha2, + "iso3166_1_alpha3": isIso3166Alpha3, + "iso3166_1_alpha_numeric": isIso3166AlphaNumeric, + } +) + +var oneofValsCache = map[string][]string{} +var oneofValsCacheRWLock = sync.RWMutex{} + +func parseOneOfParam2(s string) []string { + oneofValsCacheRWLock.RLock() + vals, ok := oneofValsCache[s] + oneofValsCacheRWLock.RUnlock() + if !ok { + oneofValsCacheRWLock.Lock() + vals = splitParamsRegex.FindAllString(s, -1) + for i := 0; i < len(vals); i++ { + vals[i] = strings.Replace(vals[i], "'", "", -1) + } + oneofValsCache[s] = vals + oneofValsCacheRWLock.Unlock() + } + return vals +} + +func isURLEncoded(fl FieldLevel) bool { + return uRLEncodedRegex.MatchString(fl.Field().String()) +} + +func isHTMLEncoded(fl FieldLevel) bool { + return hTMLEncodedRegex.MatchString(fl.Field().String()) +} + +func isHTML(fl FieldLevel) bool { + return hTMLRegex.MatchString(fl.Field().String()) +} + +func isOneOf(fl FieldLevel) bool { + vals := parseOneOfParam2(fl.Param()) + + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + for i := 0; i < len(vals); i++ { + if vals[i] == v { + return true + } + } + return false +} + +// isUnique is the validation function for validating if each array|slice|map value is unique +func isUnique(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + v := reflect.ValueOf(struct{}{}) + + switch field.Kind() { + case reflect.Slice, reflect.Array: + elem := field.Type().Elem() + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + + if param == "" { + m := reflect.MakeMap(reflect.MapOf(elem, v.Type())) + + for i := 0; i < field.Len(); i++ { + m.SetMapIndex(reflect.Indirect(field.Index(i)), v) + } + return field.Len() == m.Len() + } + + sf, ok := elem.FieldByName(param) + if !ok { + panic(fmt.Sprintf("Bad field name %s", param)) + } + + sfTyp := sf.Type + if sfTyp.Kind() == reflect.Ptr { + sfTyp = sfTyp.Elem() + } + + m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type())) + for i := 0; i < field.Len(); i++ { + m.SetMapIndex(reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param)), v) + } + return field.Len() == m.Len() + case reflect.Map: + m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type())) + + for _, k := range field.MapKeys() { + m.SetMapIndex(field.MapIndex(k), v) + } + return field.Len() == m.Len() + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } +} + +// IsMAC is the validation function for validating if the field's value is a valid MAC address. +func isMAC(fl FieldLevel) bool { + + _, err := net.ParseMAC(fl.Field().String()) + + return err == nil +} + +// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. +func isCIDRv4(fl FieldLevel) bool { + + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() != nil +} + +// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. +func isCIDRv6(fl FieldLevel) bool { + + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() == nil +} + +// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. +func isCIDR(fl FieldLevel) bool { + + _, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil +} + +// IsIPv4 is the validation function for validating if a value is a valid v4 IP address. +func isIPv4(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() != nil +} + +// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address. +func isIPv6(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() == nil +} + +// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. +func isIP(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil +} + +// IsSSN is the validation function for validating if the field's value is a valid SSN. +func isSSN(fl FieldLevel) bool { + + field := fl.Field() + + if field.Len() != 11 { + return false + } + + return sSNRegex.MatchString(field.String()) +} + +// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate. +func isLongitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + return longitudeRegex.MatchString(v) +} + +// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate. +func isLatitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + return latitudeRegex.MatchString(v) +} + +// IsDataURI is the validation function for validating if the field's value is a valid data URI. +func isDataURI(fl FieldLevel) bool { + + uri := strings.SplitN(fl.Field().String(), ",", 2) + + if len(uri) != 2 { + return false + } + + if !dataURIRegex.MatchString(uri[0]) { + return false + } + + return base64Regex.MatchString(uri[1]) +} + +// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. +func hasMultiByteCharacter(fl FieldLevel) bool { + + field := fl.Field() + + if field.Len() == 0 { + return true + } + + return multibyteRegex.MatchString(field.String()) +} + +// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. +func isPrintableASCII(fl FieldLevel) bool { + return printableASCIIRegex.MatchString(fl.Field().String()) +} + +// IsASCII is the validation function for validating if the field's value is a valid ASCII character. +func isASCII(fl FieldLevel) bool { + return aSCIIRegex.MatchString(fl.Field().String()) +} + +// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID. +func isUUID5(fl FieldLevel) bool { + return uUID5Regex.MatchString(fl.Field().String()) +} + +// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID. +func isUUID4(fl FieldLevel) bool { + return uUID4Regex.MatchString(fl.Field().String()) +} + +// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID. +func isUUID3(fl FieldLevel) bool { + return uUID3Regex.MatchString(fl.Field().String()) +} + +// IsUUID is the validation function for validating if the field's value is a valid UUID of any version. +func isUUID(fl FieldLevel) bool { + return uUIDRegex.MatchString(fl.Field().String()) +} + +// IsUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID. +func isUUID5RFC4122(fl FieldLevel) bool { + return uUID5RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID. +func isUUID4RFC4122(fl FieldLevel) bool { + return uUID4RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID. +func isUUID3RFC4122(fl FieldLevel) bool { + return uUID3RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version. +func isUUIDRFC4122(fl FieldLevel) bool { + return uUIDRFC4122Regex.MatchString(fl.Field().String()) +} + +// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. +func isISBN(fl FieldLevel) bool { + return isISBN10(fl) || isISBN13(fl) +} + +// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. +func isISBN13(fl FieldLevel) bool { + + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) + + if !iSBN13Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + factor := []int32{1, 3} + + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(s[i]-'0') + } + + return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 +} + +// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. +func isISBN10(fl FieldLevel) bool { + + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) + + if !iSBN10Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(s[i]-'0') + } + + if s[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(s[9]-'0') + } + + return checksum%11 == 0 +} + +// IsEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address. +func isEthereumAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !ethAddressRegex.MatchString(address) { + return false + } + + if ethaddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) { + return true + } + + // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md + address = address[2:] // Skip "0x" prefix. + h := sha3.NewLegacyKeccak256() + // hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash + _, _ = h.Write([]byte(strings.ToLower(address))) + hash := hex.EncodeToString(h.Sum(nil)) + + for i := 0; i < len(address); i++ { + if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case. + continue + } + if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' { + return false + } + } + + return true +} + +// IsBitcoinAddress is the validation function for validating if the field's value is a valid btc address +func isBitcoinAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcAddressRegex.MatchString(address) { + return false + } + + alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") + + decode := [25]byte{} + + for _, n := range []byte(address) { + d := bytes.IndexByte(alphabet, n) + + for i := 24; i >= 0; i-- { + d += 58 * int(decode[i]) + decode[i] = byte(d % 256) + d /= 256 + } + } + + h := sha256.New() + _, _ = h.Write(decode[:21]) + d := h.Sum([]byte{}) + h = sha256.New() + _, _ = h.Write(d) + + validchecksum := [4]byte{} + computedchecksum := [4]byte{} + + copy(computedchecksum[:], h.Sum(d[:0])) + copy(validchecksum[:], decode[21:]) + + return validchecksum == computedchecksum +} + +// IsBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address +func isBitcoinBech32Address(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) { + return false + } + + am := len(address) % 8 + + if am == 0 || am == 3 || am == 5 { + return false + } + + address = strings.ToLower(address) + + alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l" + + hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc + addr := address[3:] + dp := make([]int, 0, len(addr)) + + for _, c := range addr { + dp = append(dp, strings.IndexRune(alphabet, c)) + } + + ver := dp[0] + + if ver < 0 || ver > 16 { + return false + } + + if ver == 0 { + if len(address) != 42 && len(address) != 62 { + return false + } + } + + values := append(hr, dp...) + + GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} + + p := 1 + + for _, v := range values { + b := p >> 25 + p = (p&0x1ffffff)<<5 ^ v + + for i := 0; i < 5; i++ { + if (b>>uint(i))&1 == 1 { + p ^= GEN[i] + } + } + } + + if p != 1 { + return false + } + + b := uint(0) + acc := 0 + mv := (1 << 5) - 1 + var sw []int + + for _, v := range dp[1 : len(dp)-6] { + acc = (acc << 5) | v + b += 5 + for b >= 8 { + b -= 8 + sw = append(sw, (acc>>b)&mv) + } + } + + if len(sw) < 2 || len(sw) > 40 { + return false + } + + return true +} + +// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param. +func excludesRune(fl FieldLevel) bool { + return !containsRune(fl) +} + +// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param. +func excludesAll(fl FieldLevel) bool { + return !containsAny(fl) +} + +// Excludes is the validation function for validating that the field's value does not contain the text specified within the param. +func excludes(fl FieldLevel) bool { + return !contains(fl) +} + +// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param. +func containsRune(fl FieldLevel) bool { + + r, _ := utf8.DecodeRuneInString(fl.Param()) + + return strings.ContainsRune(fl.Field().String(), r) +} + +// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param. +func containsAny(fl FieldLevel) bool { + return strings.ContainsAny(fl.Field().String(), fl.Param()) +} + +// Contains is the validation function for validating that the field's value contains the text specified within the param. +func contains(fl FieldLevel) bool { + return strings.Contains(fl.Field().String(), fl.Param()) +} + +// StartsWith is the validation function for validating that the field's value starts with the text specified within the param. +func startsWith(fl FieldLevel) bool { + return strings.HasPrefix(fl.Field().String(), fl.Param()) +} + +// EndsWith is the validation function for validating that the field's value ends with the text specified within the param. +func endsWith(fl FieldLevel) bool { + return strings.HasSuffix(fl.Field().String(), fl.Param()) +} + +// StartsNotWith is the validation function for validating that the field's value does not start with the text specified within the param. +func startsNotWith(fl FieldLevel) bool { + return !startsWith(fl) +} + +// EndsNotWith is the validation function for validating that the field's value does not end with the text specified within the param. +func endsNotWith(fl FieldLevel) bool { + return !endsWith(fl) +} + +// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value. +func fieldContains(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + + if !ok { + return false + } + + return strings.Contains(field.String(), currentField.String()) +} + +// FieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value. +func fieldExcludes(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + if !ok { + return true + } + + return !strings.Contains(field.String(), currentField.String()) +} + +// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. +func isNeField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + + if !ok || currentKind != kind { + return true + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() != currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() != currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() != currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) != int64(currentField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return true + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() != currentField.String() +} + +// IsNe is the validation function for validating that the field's value does not equal the provided param value. +func isNe(fl FieldLevel) bool { + return !isEq(fl) +} + +// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. +func isLteCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() <= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() <= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() <= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) <= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() <= topField.String() +} + +// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func isLtCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() < topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() < topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() < topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) < int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) + } + } + + // default reflect.String: + return field.String() < topField.String() +} + +// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. +func isGteCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() >= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() >= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() >= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) >= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() >= topField.String() +} + +// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. +func isGtCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() > topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() > topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() > topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) > int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) + } + } + + // default reflect.String: + return field.String() > topField.String() +} + +// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. +func isNeCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return true + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() != field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() != field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() != field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) != int64(field.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return true + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() != field.String() +} + +// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. +func isEqCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() == field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() == field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() == field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) == int64(field.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() == field.String() +} + +// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. +func isEqField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() == currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == int64(currentField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() == currentField.String() +} + +// IsEq is the validation function for validating if the current field's value is equal to the param's value. +func isEq(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + return field.String() == param + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + + case reflect.Bool: + p := asBool(param) + + return field.Bool() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsBase64 is the validation function for validating if the current field's value is a valid base 64. +func isBase64(fl FieldLevel) bool { + return base64Regex.MatchString(fl.Field().String()) +} + +// IsBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. +func isBase64URL(fl FieldLevel) bool { + return base64URLRegex.MatchString(fl.Field().String()) +} + +// IsURI is the validation function for validating if the current field's value is a valid URI. +func isURI(fl FieldLevel) bool { + + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i := strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + _, err := url.ParseRequestURI(s) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsURL is the validation function for validating if the current field's value is a valid URL. +func isURL(fl FieldLevel) bool { + + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + var i int + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i = strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + url, err := url.ParseRequestURI(s) + + if err != nil || url.Scheme == "" { + return false + } + + return true + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141. +func isUrnRFC2141(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + str := field.String() + + _, match := urn.Parse([]byte(str)) + + return match + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsFile is the validation function for validating if the current field's value is a valid file path. +func isFile(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return !fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number. +func isE164(fl FieldLevel) bool { + return e164Regex.MatchString(fl.Field().String()) +} + +// IsEmail is the validation function for validating if the current field's value is a valid email address. +func isEmail(fl FieldLevel) bool { + return emailRegex.MatchString(fl.Field().String()) +} + +// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color. +func isHSLA(fl FieldLevel) bool { + return hslaRegex.MatchString(fl.Field().String()) +} + +// IsHSL is the validation function for validating if the current field's value is a valid HSL color. +func isHSL(fl FieldLevel) bool { + return hslRegex.MatchString(fl.Field().String()) +} + +// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color. +func isRGBA(fl FieldLevel) bool { + return rgbaRegex.MatchString(fl.Field().String()) +} + +// IsRGB is the validation function for validating if the current field's value is a valid RGB color. +func isRGB(fl FieldLevel) bool { + return rgbRegex.MatchString(fl.Field().String()) +} + +// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color. +func isHEXColor(fl FieldLevel) bool { + return hexcolorRegex.MatchString(fl.Field().String()) +} + +// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. +func isHexadecimal(fl FieldLevel) bool { + return hexadecimalRegex.MatchString(fl.Field().String()) +} + +// IsNumber is the validation function for validating if the current field's value is a valid number. +func isNumber(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numberRegex.MatchString(fl.Field().String()) + } +} + +// IsNumeric is the validation function for validating if the current field's value is a valid numeric value. +func isNumeric(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numericRegex.MatchString(fl.Field().String()) + } +} + +// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. +func isAlphanum(fl FieldLevel) bool { + return alphaNumericRegex.MatchString(fl.Field().String()) +} + +// IsAlpha is the validation function for validating if the current field's value is a valid alpha value. +func isAlpha(fl FieldLevel) bool { + return alphaRegex.MatchString(fl.Field().String()) +} + +// IsAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. +func isAlphanumUnicode(fl FieldLevel) bool { + return alphaUnicodeNumericRegex.MatchString(fl.Field().String()) +} + +// IsAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. +func isAlphaUnicode(fl FieldLevel) bool { + return alphaUnicodeRegex.MatchString(fl.Field().String()) +} + +// isDefault is the opposite of required aka hasValue +func isDefault(fl FieldLevel) bool { + return !hasValue(fl) +} + +// HasValue is the validation function for validating if the current field's value is not the default static value. +func hasValue(fl FieldLevel) bool { + field := fl.Field() + switch field.Kind() { + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return !field.IsNil() + default: + if fl.(*validate).fldIsPointer && field.Interface() != nil { + return true + } + return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface() + } +} + +// requireCheckField is a func for check field kind +func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool { + field := fl.Field() + kind := field.Kind() + var nullable, found bool + if len(param) > 0 { + field, kind, nullable, found = fl.GetStructFieldOKAdvanced2(fl.Parent(), param) + if !found { + return defaultNotFoundValue + } + } + switch kind { + case reflect.Invalid: + return defaultNotFoundValue + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return field.IsNil() + default: + if nullable && field.Interface() != nil { + return false + } + return field.IsValid() && field.Interface() == reflect.Zero(field.Type()).Interface() + } +} + +// requireCheckFieldValue is a func for check field value +func requireCheckFieldValue(fl FieldLevel, param string, value string, defaultNotFoundValue bool) bool { + field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param) + if !found { + return defaultNotFoundValue + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == asInt(value) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == asUint(value) + + case reflect.Float32, reflect.Float64: + return field.Float() == asFloat(value) + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == asInt(value) + } + + // default reflect.String: + return field.String() == value +} + +// requiredIf is the validation function +// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field. +func requiredIf(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName())) + } + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// requiredUnless is the validation function +// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field. +func requiredUnless(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName())) + } + + for i := 0; i < len(params); i += 2 { + if requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// ExcludedWith is the validation function +// The field under validation must not be present or is empty if any of the other specified fields are present. +func excludedWith(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return !hasValue(fl) + } + } + return true +} + +// RequiredWith is the validation function +// The field under validation must be present and not empty only if any of the other specified fields are present. +func requiredWith(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return hasValue(fl) + } + } + return true +} + +// ExcludedWithAll is the validation function +// The field under validation must not be present or is empty if all of the other specified fields are present. +func excludedWithAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return true + } + } + return !hasValue(fl) +} + +// RequiredWithAll is the validation function +// The field under validation must be present and not empty only if all of the other specified fields are present. +func requiredWithAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return true + } + } + return hasValue(fl) +} + +// ExcludedWithout is the validation function +// The field under validation must not be present or is empty when any of the other specified fields are not present. +func excludedWithout(fl FieldLevel) bool { + if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { + return !hasValue(fl) + } + return true +} + +// RequiredWithout is the validation function +// The field under validation must be present and not empty only when any of the other specified fields are not present. +func requiredWithout(fl FieldLevel) bool { + if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { + return hasValue(fl) + } + return true +} + +// RequiredWithoutAll is the validation function +// The field under validation must not be present or is empty when all of the other specified fields are not present. +func excludedWithoutAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return true + } + } + return !hasValue(fl) +} + +// RequiredWithoutAll is the validation function +// The field under validation must be present and not empty only when all of the other specified fields are not present. +func requiredWithoutAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return true + } + } + return hasValue(fl) +} + +// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. +func isGteField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() >= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() >= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() >= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) >= len(currentField.String()) +} + +// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. +func isGtField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() > currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() > currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() > currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) + } + } + + // default reflect.String + return len(field.String()) > len(currentField.String()) +} + +// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value. +func isGte(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) >= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) >= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() >= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() >= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() >= p + + case reflect.Struct: + + if field.Type() == timeType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.After(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsGt is the validation function for validating if the current field's value is greater than the param's value. +func isGt(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) > p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) > p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() > p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() > p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() > p + case reflect.Struct: + + if field.Type() == timeType { + + return field.Interface().(time.Time).After(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value. +func hasLengthOf(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) == p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. +func hasMinOf(fl FieldLevel) bool { + return isGte(fl) +} + +// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. +func isLteField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() <= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() <= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() <= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) <= len(currentField.String()) +} + +// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. +func isLtField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() < currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() < currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() < currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) + } + } + + // default reflect.String + return len(field.String()) < len(currentField.String()) +} + +// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value. +func isLte(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) <= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) <= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() <= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() <= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() <= p + + case reflect.Struct: + + if field.Type() == timeType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.Before(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsLt is the validation function for validating if the current field's value is less than the param's value. +func isLt(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) < p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) < p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() < p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() < p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() < p + + case reflect.Struct: + + if field.Type() == timeType { + + return field.Interface().(time.Time).Before(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. +func hasMaxOf(fl FieldLevel) bool { + return isLte(fl) +} + +// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. +func isTCP4AddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp4", fl.Field().String()) + return err == nil +} + +// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. +func isTCP6AddrResolvable(fl FieldLevel) bool { + + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp6", fl.Field().String()) + + return err == nil +} + +// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. +func isTCPAddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp", fl.Field().String()) + + return err == nil +} + +// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. +func isUDP4AddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp4", fl.Field().String()) + + return err == nil +} + +// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. +func isUDP6AddrResolvable(fl FieldLevel) bool { + + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp6", fl.Field().String()) + + return err == nil +} + +// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. +func isUDPAddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp", fl.Field().String()) + + return err == nil +} + +// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. +func isIP4AddrResolvable(fl FieldLevel) bool { + + if !isIPv4(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip4", fl.Field().String()) + + return err == nil +} + +// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. +func isIP6AddrResolvable(fl FieldLevel) bool { + + if !isIPv6(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip6", fl.Field().String()) + + return err == nil +} + +// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. +func isIPAddrResolvable(fl FieldLevel) bool { + + if !isIP(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip", fl.Field().String()) + + return err == nil +} + +// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. +func isUnixAddrResolvable(fl FieldLevel) bool { + + _, err := net.ResolveUnixAddr("unix", fl.Field().String()) + + return err == nil +} + +func isIP4Addr(fl FieldLevel) bool { + + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + val = val[0:idx] + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() != nil +} + +func isIP6Addr(fl FieldLevel) bool { + + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + if idx != 0 && val[idx-1:idx] == "]" { + val = val[1 : idx-1] + } + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() == nil +} + +func isHostnameRFC952(fl FieldLevel) bool { + return hostnameRegexRFC952.MatchString(fl.Field().String()) +} + +func isHostnameRFC1123(fl FieldLevel) bool { + return hostnameRegexRFC1123.MatchString(fl.Field().String()) +} + +func isFQDN(fl FieldLevel) bool { + val := fl.Field().String() + + if val == "" { + return false + } + + return fqdnRegexRFC1123.MatchString(val) +} + +// IsDir is the validation function for validating if the current field's value is a valid directory. +func isDir(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isJSON is the validation function for validating if the current field's value is a valid json string. +func isJSON(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + val := field.String() + return json.Valid([]byte(val)) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isHostnamePort validates a : combination for fields typically used for socket address. +func isHostnamePort(fl FieldLevel) bool { + val := fl.Field().String() + host, port, err := net.SplitHostPort(val) + if err != nil { + return false + } + // Port must be a iny <= 65535. + if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 1 { + return false + } + + // If host is specified, it should match a DNS name + if host != "" { + return hostnameRegexRFC1123.MatchString(host) + } + return true +} + +// isLowercase is the validation function for validating if the current field's value is a lowercase string. +func isLowercase(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + if field.String() == "" { + return false + } + return field.String() == strings.ToLower(field.String()) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isUppercase is the validation function for validating if the current field's value is an uppercase string. +func isUppercase(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + if field.String() == "" { + return false + } + return field.String() == strings.ToUpper(field.String()) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isDatetime is the validation function for validating if the current field's value is a valid datetime string. +func isDatetime(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + if field.Kind() == reflect.String { + _, err := time.Parse(param, field.String()) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isTimeZone is the validation function for validating if the current field's value is a valid time zone string. +func isTimeZone(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + // empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name + if field.String() == "" { + return false + } + + // Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name + if strings.ToLower(field.String()) == "local" { + return false + } + + _, err := time.LoadLocation(field.String()) + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code. +func isIso3166Alpha2(fl FieldLevel) bool { + val := fl.Field().String() + return iso3166_1_alpha2[val] +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code. +func isIso3166Alpha3(fl FieldLevel) bool { + val := fl.Field().String() + return iso3166_1_alpha3[val] +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code. +func isIso3166AlphaNumeric(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int() % 1000) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint() % 1000) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + return iso3166_1_alpha_numeric[code] +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go similarity index 53% rename from vendor/gopkg.in/go-playground/validator.v8/cache.go rename to vendor/github.com/go-playground/validator/v10/cache.go index 76670c1dd..0d18d6ec4 100644 --- a/vendor/gopkg.in/go-playground/validator.v8/cache.go +++ b/vendor/github.com/go-playground/validator/v10/cache.go @@ -13,11 +13,19 @@ type tagType uint8 const ( typeDefault tagType = iota typeOmitEmpty + typeIsDefault typeNoStructLevel typeStructOnly typeDive typeOr - typeExists + typeKeys + typeEndKeys +) + +const ( + invalidValidation = "Invalid validation tag on field '%s'" + undefinedValidation = "Undefined validation function '%s' on field '%s'" + keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag" ) type structCache struct { @@ -31,9 +39,7 @@ func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) { } func (sc *structCache) Set(key reflect.Type, value *cStruct) { - m := sc.m.Load().(map[reflect.Type]*cStruct) - nm := make(map[reflect.Type]*cStruct, len(m)+1) for k, v := range m { nm[k] = v @@ -53,9 +59,7 @@ func (tc *tagCache) Get(key string) (c *cTag, found bool) { } func (tc *tagCache) Set(key string, value *cTag) { - m := tc.m.Load().(map[string]*cTag) - nm := make(map[string]*cTag, len(m)+1) for k, v := range m { nm[k] = v @@ -65,32 +69,36 @@ func (tc *tagCache) Set(key string, value *cTag) { } type cStruct struct { - Name string - fields map[int]*cField - fn StructLevelFunc + name string + fields []*cField + fn StructLevelFuncCtx } type cField struct { - Idx int - Name string - AltName string - cTags *cTag + idx int + name string + altName string + namesEqual bool + cTags *cTag } type cTag struct { - tag string - aliasTag string - actualAliasTag string - param string - hasAlias bool - typeof tagType - hasTag bool - fn Func - next *cTag + tag string + aliasTag string + actualAliasTag string + param string + keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation + next *cTag + fn FuncCtx + typeof tagType + hasTag bool + hasAlias bool + hasParam bool // true if parameter used eg. eq= where the equal sign has been set + isBlockEnd bool // indicates the current tag represents the last validation in the block + runValidationWhenNil bool } func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct { - v.structCache.lock.Lock() defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise! @@ -103,7 +111,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr return cs } - cs = &cStruct{Name: sName, fields: make(map[int]*cField), fn: v.structLevelFuncs[typ]} + cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]} numFields := current.NumField() @@ -116,7 +124,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr fld = typ.Field(i) - if !fld.Anonymous && fld.PkgPath != blank { + if !fld.Anonymous && len(fld.PkgPath) > 0 { continue } @@ -128,12 +136,9 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr customName = fld.Name - if v.fieldNameTag != blank { - - name := strings.SplitN(fld.Tag.Get(v.fieldNameTag), ",", 2)[0] - - // dash check is for json "-" (aka skipValidationTag) means don't output in json - if name != "" && name != skipValidationTag { + if v.hasTagNameFunc { + name := v.tagNameFunc(fld) + if len(name) > 0 { customName = name } } @@ -142,66 +147,102 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr // and so only struct level caching can be used instead of combined with Field tag caching if len(tag) > 0 { - ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, blank, false) + ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false) } else { // even if field doesn't have validations need cTag for traversing to potential inner/nested // elements of the field. ctag = new(cTag) } - cs.fields[i] = &cField{Idx: i, Name: fld.Name, AltName: customName, cTags: ctag} + cs.fields = append(cs.fields, &cField{ + idx: i, + name: fld.Name, + altName: customName, + cTags: ctag, + namesEqual: fld.Name == customName, + }) } - v.structCache.Set(typ, cs) - return cs } func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { - var t string - var ok bool noAlias := len(alias) == 0 tags := strings.Split(tag, tagSeparator) for i := 0; i < len(tags); i++ { - t = tags[i] - if noAlias { alias = t } - if v.hasAliasValidators { - // check map for alias and process new tags, otherwise process as usual - if tagsVal, found := v.aliasValidators[t]; found { - - if i == 0 { - firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) - } else { - next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) - current.next, current = next, curr - - } + // check map for alias and process new tags, otherwise process as usual + if tagsVal, found := v.aliases[t]; found { + if i == 0 { + firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + } else { + next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + current.next, current = next, curr - continue } + continue } + var prevTag tagType + if i == 0 { - current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault} firstCtag = current } else { + prevTag = current.typeof current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} current = current.next } switch t { - case diveTag: current.typeof = typeDive continue + case keysTag: + current.typeof = typeKeys + + if i == 0 || prevTag != typeDive { + panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag)) + } + + current.typeof = typeKeys + + // need to pass along only keys tag + // need to increment i to skip over the keys tags + b := make([]byte, 0, 64) + + i++ + + for ; i < len(tags); i++ { + + b = append(b, tags[i]...) + b = append(b, ',') + + if tags[i] == endKeysTag { + break + } + } + + current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false) + continue + + case endKeysTag: + current.typeof = typeEndKeys + + // if there are more in tags then there was no keysTag defined + // and an error should be thrown + if i != len(tags)-1 { + panic(keysTagNotDefined) + } + return + case omitempty: current.typeof = typeOmitEmpty continue @@ -214,19 +255,15 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s current.typeof = typeNoStructLevel continue - case existsTag: - current.typeof = typeExists - continue - default: - + if t == isdefault { + current.typeof = typeIsDefault + } // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C" orVals := strings.Split(t, orSeparator) for j := 0; j < len(orVals); j++ { - vals := strings.SplitN(orVals[j], tagKeySeparator, 2) - if noAlias { alias = vals[0] current.aliasTag = alias @@ -238,14 +275,18 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true} current = current.next } + current.hasParam = len(vals) > 1 current.tag = vals[0] if len(current.tag) == 0 { panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) } - if current.fn, ok = v.validationFuncs[current.tag]; !ok { - panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, fieldName))) + if wrapper, ok := v.validations[current.tag]; ok { + current.fn = wrapper.fn + current.runValidationWhenNil = wrapper.runValidatinOnNil + } else { + panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) } if len(orVals) > 1 { @@ -256,8 +297,26 @@ func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias s current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1) } } + current.isBlockEnd = true } } - return } + +func (v *Validate) fetchCacheTag(tag string) *cTag { + // find cached tag + ctag, found := v.tagCache.Get(tag) + if !found { + v.tagCache.lock.Lock() + defer v.tagCache.lock.Unlock() + + // could have been multiple trying to access, but once first is done this ensures tag + // isn't parsed again. + ctag, found = v.tagCache.Get(tag) + if !found { + ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false) + v.tagCache.Set(tag, ctag) + } + } + return ctag +} diff --git a/vendor/github.com/go-playground/validator/v10/country_codes.go b/vendor/github.com/go-playground/validator/v10/country_codes.go new file mode 100644 index 000000000..ef81eada8 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/country_codes.go @@ -0,0 +1,162 @@ +package validator + +var iso3166_1_alpha2 = map[string]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + "AF": true, "AX": true, "AL": true, "DZ": true, "AS": true, + "AD": true, "AO": true, "AI": true, "AQ": true, "AG": true, + "AR": true, "AM": true, "AW": true, "AU": true, "AT": true, + "AZ": true, "BS": true, "BH": true, "BD": true, "BB": true, + "BY": true, "BE": true, "BZ": true, "BJ": true, "BM": true, + "BT": true, "BO": true, "BQ": true, "BA": true, "BW": true, + "BV": true, "BR": true, "IO": true, "BN": true, "BG": true, + "BF": true, "BI": true, "KH": true, "CM": true, "CA": true, + "CV": true, "KY": true, "CF": true, "TD": true, "CL": true, + "CN": true, "CX": true, "CC": true, "CO": true, "KM": true, + "CG": true, "CD": true, "CK": true, "CR": true, "CI": true, + "HR": true, "CU": true, "CW": true, "CY": true, "CZ": true, + "DK": true, "DJ": true, "DM": true, "DO": true, "EC": true, + "EG": true, "SV": true, "GQ": true, "ER": true, "EE": true, + "ET": true, "FK": true, "FO": true, "FJ": true, "FI": true, + "FR": true, "GF": true, "PF": true, "TF": true, "GA": true, + "GM": true, "GE": true, "DE": true, "GH": true, "GI": true, + "GR": true, "GL": true, "GD": true, "GP": true, "GU": true, + "GT": true, "GG": true, "GN": true, "GW": true, "GY": true, + "HT": true, "HM": true, "VA": true, "HN": true, "HK": true, + "HU": true, "IS": true, "IN": true, "ID": true, "IR": true, + "IQ": true, "IE": true, "IM": true, "IL": true, "IT": true, + "JM": true, "JP": true, "JE": true, "JO": true, "KZ": true, + "KE": true, "KI": true, "KP": true, "KR": true, "KW": true, + "KG": true, "LA": true, "LV": true, "LB": true, "LS": true, + "LR": true, "LY": true, "LI": true, "LT": true, "LU": true, + "MO": true, "MK": true, "MG": true, "MW": true, "MY": true, + "MV": true, "ML": true, "MT": true, "MH": true, "MQ": true, + "MR": true, "MU": true, "YT": true, "MX": true, "FM": true, + "MD": true, "MC": true, "MN": true, "ME": true, "MS": true, + "MA": true, "MZ": true, "MM": true, "NA": true, "NR": true, + "NP": true, "NL": true, "NC": true, "NZ": true, "NI": true, + "NE": true, "NG": true, "NU": true, "NF": true, "MP": true, + "NO": true, "OM": true, "PK": true, "PW": true, "PS": true, + "PA": true, "PG": true, "PY": true, "PE": true, "PH": true, + "PN": true, "PL": true, "PT": true, "PR": true, "QA": true, + "RE": true, "RO": true, "RU": true, "RW": true, "BL": true, + "SH": true, "KN": true, "LC": true, "MF": true, "PM": true, + "VC": true, "WS": true, "SM": true, "ST": true, "SA": true, + "SN": true, "RS": true, "SC": true, "SL": true, "SG": true, + "SX": true, "SK": true, "SI": true, "SB": true, "SO": true, + "ZA": true, "GS": true, "SS": true, "ES": true, "LK": true, + "SD": true, "SR": true, "SJ": true, "SZ": true, "SE": true, + "CH": true, "SY": true, "TW": true, "TJ": true, "TZ": true, + "TH": true, "TL": true, "TG": true, "TK": true, "TO": true, + "TT": true, "TN": true, "TR": true, "TM": true, "TC": true, + "TV": true, "UG": true, "UA": true, "AE": true, "GB": true, + "US": true, "UM": true, "UY": true, "UZ": true, "VU": true, + "VE": true, "VN": true, "VG": true, "VI": true, "WF": true, + "EH": true, "YE": true, "ZM": true, "ZW": true, +} + +var iso3166_1_alpha3 = map[string]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + "AFG": true, "ALB": true, "DZA": true, "ASM": true, "AND": true, + "AGO": true, "AIA": true, "ATA": true, "ATG": true, "ARG": true, + "ARM": true, "ABW": true, "AUS": true, "AUT": true, "AZE": true, + "BHS": true, "BHR": true, "BGD": true, "BRB": true, "BLR": true, + "BEL": true, "BLZ": true, "BEN": true, "BMU": true, "BTN": true, + "BOL": true, "BES": true, "BIH": true, "BWA": true, "BVT": true, + "BRA": true, "IOT": true, "BRN": true, "BGR": true, "BFA": true, + "BDI": true, "CPV": true, "KHM": true, "CMR": true, "CAN": true, + "CYM": true, "CAF": true, "TCD": true, "CHL": true, "CHN": true, + "CXR": true, "CCK": true, "COL": true, "COM": true, "COD": true, + "COG": true, "COK": true, "CRI": true, "HRV": true, "CUB": true, + "CUW": true, "CYP": true, "CZE": true, "CIV": true, "DNK": true, + "DJI": true, "DMA": true, "DOM": true, "ECU": true, "EGY": true, + "SLV": true, "GNQ": true, "ERI": true, "EST": true, "SWZ": true, + "ETH": true, "FLK": true, "FRO": true, "FJI": true, "FIN": true, + "FRA": true, "GUF": true, "PYF": true, "ATF": true, "GAB": true, + "GMB": true, "GEO": true, "DEU": true, "GHA": true, "GIB": true, + "GRC": true, "GRL": true, "GRD": true, "GLP": true, "GUM": true, + "GTM": true, "GGY": true, "GIN": true, "GNB": true, "GUY": true, + "HTI": true, "HMD": true, "VAT": true, "HND": true, "HKG": true, + "HUN": true, "ISL": true, "IND": true, "IDN": true, "IRN": true, + "IRQ": true, "IRL": true, "IMN": true, "ISR": true, "ITA": true, + "JAM": true, "JPN": true, "JEY": true, "JOR": true, "KAZ": true, + "KEN": true, "KIR": true, "PRK": true, "KOR": true, "KWT": true, + "KGZ": true, "LAO": true, "LVA": true, "LBN": true, "LSO": true, + "LBR": true, "LBY": true, "LIE": true, "LTU": true, "LUX": true, + "MAC": true, "MDG": true, "MWI": true, "MYS": true, "MDV": true, + "MLI": true, "MLT": true, "MHL": true, "MTQ": true, "MRT": true, + "MUS": true, "MYT": true, "MEX": true, "FSM": true, "MDA": true, + "MCO": true, "MNG": true, "MNE": true, "MSR": true, "MAR": true, + "MOZ": true, "MMR": true, "NAM": true, "NRU": true, "NPL": true, + "NLD": true, "NCL": true, "NZL": true, "NIC": true, "NER": true, + "NGA": true, "NIU": true, "NFK": true, "MKD": true, "MNP": true, + "NOR": true, "OMN": true, "PAK": true, "PLW": true, "PSE": true, + "PAN": true, "PNG": true, "PRY": true, "PER": true, "PHL": true, + "PCN": true, "POL": true, "PRT": true, "PRI": true, "QAT": true, + "ROU": true, "RUS": true, "RWA": true, "REU": true, "BLM": true, + "SHN": true, "KNA": true, "LCA": true, "MAF": true, "SPM": true, + "VCT": true, "WSM": true, "SMR": true, "STP": true, "SAU": true, + "SEN": true, "SRB": true, "SYC": true, "SLE": true, "SGP": true, + "SXM": true, "SVK": true, "SVN": true, "SLB": true, "SOM": true, + "ZAF": true, "SGS": true, "SSD": true, "ESP": true, "LKA": true, + "SDN": true, "SUR": true, "SJM": true, "SWE": true, "CHE": true, + "SYR": true, "TWN": true, "TJK": true, "TZA": true, "THA": true, + "TLS": true, "TGO": true, "TKL": true, "TON": true, "TTO": true, + "TUN": true, "TUR": true, "TKM": true, "TCA": true, "TUV": true, + "UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true, + "USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true, + "VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true, + "YEM": true, "ZMB": true, "ZWE": true, "ALA": true, +} +var iso3166_1_alpha_numeric = map[int]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + 4: true, 8: true, 12: true, 16: true, 20: true, + 24: true, 660: true, 10: true, 28: true, 32: true, + 51: true, 533: true, 36: true, 40: true, 31: true, + 44: true, 48: true, 50: true, 52: true, 112: true, + 56: true, 84: true, 204: true, 60: true, 64: true, + 68: true, 535: true, 70: true, 72: true, 74: true, + 76: true, 86: true, 96: true, 100: true, 854: true, + 108: true, 132: true, 116: true, 120: true, 124: true, + 136: true, 140: true, 148: true, 152: true, 156: true, + 162: true, 166: true, 170: true, 174: true, 180: true, + 178: true, 184: true, 188: true, 191: true, 192: true, + 531: true, 196: true, 203: true, 384: true, 208: true, + 262: true, 212: true, 214: true, 218: true, 818: true, + 222: true, 226: true, 232: true, 233: true, 748: true, + 231: true, 238: true, 234: true, 242: true, 246: true, + 250: true, 254: true, 258: true, 260: true, 266: true, + 270: true, 268: true, 276: true, 288: true, 292: true, + 300: true, 304: true, 308: true, 312: true, 316: true, + 320: true, 831: true, 324: true, 624: true, 328: true, + 332: true, 334: true, 336: true, 340: true, 344: true, + 348: true, 352: true, 356: true, 360: true, 364: true, + 368: true, 372: true, 833: true, 376: true, 380: true, + 388: true, 392: true, 832: true, 400: true, 398: true, + 404: true, 296: true, 408: true, 410: true, 414: true, + 417: true, 418: true, 428: true, 422: true, 426: true, + 430: true, 434: true, 438: true, 440: true, 442: true, + 446: true, 450: true, 454: true, 458: true, 462: true, + 466: true, 470: true, 584: true, 474: true, 478: true, + 480: true, 175: true, 484: true, 583: true, 498: true, + 492: true, 496: true, 499: true, 500: true, 504: true, + 508: true, 104: true, 516: true, 520: true, 524: true, + 528: true, 540: true, 554: true, 558: true, 562: true, + 566: true, 570: true, 574: true, 807: true, 580: true, + 578: true, 512: true, 586: true, 585: true, 275: true, + 591: true, 598: true, 600: true, 604: true, 608: true, + 612: true, 616: true, 620: true, 630: true, 634: true, + 642: true, 643: true, 646: true, 638: true, 652: true, + 654: true, 659: true, 662: true, 663: true, 666: true, + 670: true, 882: true, 674: true, 678: true, 682: true, + 686: true, 688: true, 690: true, 694: true, 702: true, + 534: true, 703: true, 705: true, 90: true, 706: true, + 710: true, 239: true, 728: true, 724: true, 144: true, + 729: true, 740: true, 744: true, 752: true, 756: true, + 760: true, 158: true, 762: true, 834: true, 764: true, + 626: true, 768: true, 772: true, 776: true, 780: true, + 788: true, 792: true, 795: true, 796: true, 798: true, + 800: true, 804: true, 784: true, 826: true, 581: true, + 840: true, 858: true, 860: true, 548: true, 862: true, + 704: true, 92: true, 850: true, 876: true, 732: true, + 887: true, 894: true, 716: true, 248: true, +} diff --git a/vendor/gopkg.in/go-playground/validator.v8/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go similarity index 50% rename from vendor/gopkg.in/go-playground/validator.v8/doc.go rename to vendor/github.com/go-playground/validator/v10/doc.go index fdede2c4c..a816c20a4 100644 --- a/vendor/gopkg.in/go-playground/validator.v8/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -5,21 +5,7 @@ based on tags. It can also handle Cross-Field and Cross-Struct validation for nested structs and has the ability to dive into arrays and maps of any type. -Why not a better error message? -Because this library intends for you to handle your own error messages. - -Why should I handle my own errors? -Many reasons. We built an internationalized application and needed to know the -field, and what validation failed so we could provide a localized error. - - if fieldErr.Field == "Name" { - switch fieldErr.ErrorTag - case "required": - return "Translated string based on field + error" - default: - return "Translated string based on field" - } - +see more examples https://github.com/go-playground/validator/tree/master/_examples Validation Functions Return Type error @@ -34,18 +20,20 @@ where err is always != nil: http://stackoverflow.com/a/29138676/3158232 https://github.com/go-playground/validator/issues/134 -Validator only returns nil or ValidationErrors as type error; so, in your code -all you need to do is check if the error returned is not nil, and if it's not -type cast it to type ValidationErrors like so err.(validator.ValidationErrors). +Validator only InvalidValidationError for bad validation input, nil or +ValidationErrors as type error; so, in your code all you need to do is check +if the error returned is not nil, and if it's not check if error is +InvalidValidationError ( if necessary, most of the time it isn't ) type cast +it to type ValidationErrors like so err.(validator.ValidationErrors). -Custom Functions +Custom Validation Functions -Custom functions can be added. Example: +Custom Validation functions can be added. Example: // Structure - func customFunc(v *Validate, topStruct reflect.Value, currentStructOrField reflect.Value, field reflect.Value, fieldType reflect.Type, fieldKind reflect.Kind, param string) bool { + func customFunc(fl validator.FieldLevel) bool { - if whatever { + if fl.Field().String() == "invalid" { return false } @@ -68,7 +56,7 @@ Cross-Field Validation can be done via the following tags: - eqcsfield - necsfield - gtcsfield - - ftecsfield + - gtecsfield - ltcsfield - ltecsfield @@ -106,7 +94,7 @@ used "eqcsfield" it could be multiple levels down. Example: // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed // into the function - // when calling validate.FieldWithValue(val, field, tag) val will be + // when calling validate.VarWithValue(val, field, tag) val will be // whatever you pass, struct, field... // when calling validate.Field(field, tag) val will be nil @@ -144,7 +132,7 @@ so the above will become excludesall=0x2C. Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation. } -Pipe ("|") is the default separator of validation tags. If you wish to +Pipe ("|") is the 'or' validation tags deparator. If you wish to have a pipe included within the parameter i.e. excludesall=| you will need to use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe, so the above will become excludesall=0x7C @@ -170,7 +158,7 @@ handy in ignoring embedded structs from being validated. (Usage: -) Or Operator This is the 'or' operator allowing multiple validators to be used and -accepted. (Usage: rbg|rgba) <-- this would allow either rgb or rgba +accepted. (Usage: rgb|rgba) <-- this would allow either rgb or rgba colors to be accepted. This can also be combined with 'and' for example ( Usage: omitempty,rgb|rgba) @@ -180,7 +168,7 @@ StructOnly When a field that is a nested struct is encountered, and contains this flag any validation on the nested struct will be run, but none of the nested -struct fields will be validated. This is usefull if inside of you program +struct fields will be validated. This is useful if inside of your program you know the struct will be valid, but need to verify it has been assigned. NOTE: only "required" and "omitempty" can be used on a struct itself. @@ -192,17 +180,6 @@ Same as structonly tag except that any struct level validations will not run. Usage: nostructlevel -Exists - -Is a special tag without a validation function attached. It is used when a field -is a Pointer, Interface or Invalid and you wish to validate that it exists. -Example: want to ensure a bool exists if you define the bool as a pointer and -use exists it will ensure there is a value; couldn't use required as it would -fail when the bool was false. exists will fail is the value is a Pointer, Interface -or Invalid and is nil. - - Usage: exists - Omit Empty Allows conditional validation, for example if a field is not set with @@ -216,7 +193,8 @@ Dive This tells the validator to dive into a slice, array or map and validate that level of the slice, array or map with the validation tags that follow. Multidimensional nesting is also supported, each level you wish to dive will -require another dive tag. +require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see +the Keys & EndKeys section just below. Usage: dive @@ -234,6 +212,30 @@ Example #2 // []string will be spared validation // required will be applied to string +Keys & EndKeys + +These are to be used together directly after the dive tag and tells the validator +that anything between 'keys' and 'endkeys' applies to the keys of a map and not the +values; think of it like the 'dive' tag, but for map keys instead of values. +Multidimensional nesting is also supported, each level you wish to validate will +require another 'keys' and 'endkeys' tag. These tags are only valid for maps. + + Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags + +Example #1 + + map[string]string with validation tag "gt=0,dive,keys,eg=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eg=1|eq=2 will be applied to the map keys + // required will be applied to map values + +Example #2 + + map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eg=1|eq=2 will be applied to each array element in the the map keys + // required will be applied to map values + Required This validates that the value is not the data types default zero value. @@ -243,15 +245,127 @@ ensures the value is not nil. Usage: required +Required If + +The field under validation must be present and not empty only if all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. + + Usage: required_if + +Examples: + + // require the field if the Field1 is equal to the parameter given: + Usage: required_if=Field1 foobar + + // require the field if the Field1 and Field2 is equal to the value respectively: + Usage: required_if=Field1 foo Field2 bar + +Required Unless + +The field under validation must be present and not empty unless all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. + + Usage: required_unless + +Examples: + + // require the field unless the Field1 is equal to the parameter given: + Usage: required_unless=Field1 foobar + + // require the field unless the Field1 and Field2 is equal to the value respectively: + Usage: required_unless=Field1 foo Field2 bar + +Required With + +The field under validation must be present and not empty only if any +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_with + +Examples: + + // require the field if the Field1 is present: + Usage: required_with=Field1 + + // require the field if the Field1 or Field2 is present: + Usage: required_with=Field1 Field2 + +Required With All + +The field under validation must be present and not empty only if all +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_with_all + +Example: + + // require the field if the Field1 and Field2 is present: + Usage: required_with_all=Field1 Field2 + +Required Without + +The field under validation must be present and not empty only when any +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_without + +Examples: + + // require the field if the Field1 is not present: + Usage: required_without=Field1 + + // require the field if the Field1 or Field2 is not present: + Usage: required_without=Field1 Field2 + +Required Without All + +The field under validation must be present and not empty only when all +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_without_all + +Example: + + // require the field if the Field1 and Field2 is not present: + Usage: required_without_all=Field1 Field2 + +Is Default + +This validates that the value is the default value and is almost the +opposite of required. + + Usage: isdefault + Length -For numbers, max will ensure that the value is +For numbers, length will ensure that the value is equal to the parameter given. For strings, it checks that the string length is exactly that number of characters. For slices, arrays, and maps, validates the number of items. +Example #1 + Usage: len=10 +Example #2 (time.Duration) + +For time.Duration, len will ensure that the value is equal to the duration given +in the parameter. + + Usage: len=1h30m + Maximum For numbers, max will ensure that the value is @@ -259,33 +373,81 @@ less than or equal to the parameter given. For strings, it checks that the string length is at most that number of characters. For slices, arrays, and maps, validates the number of items. +Example #1 + Usage: max=10 -Mininum +Example #2 (time.Duration) + +For time.Duration, max will ensure that the value is less than or equal to the +duration given in the parameter. + + Usage: max=1h30m + +Minimum For numbers, min will ensure that the value is greater or equal to the parameter given. For strings, it checks that the string length is at least that number of characters. For slices, arrays, and maps, validates the number of items. +Example #1 + Usage: min=10 +Example #2 (time.Duration) + +For time.Duration, min will ensure that the value is greater than or equal to +the duration given in the parameter. + + Usage: min=1h30m + Equals For strings & numbers, eq will ensure that the value is equal to the parameter given. For slices, arrays, and maps, validates the number of items. +Example #1 + Usage: eq=10 +Example #2 (time.Duration) + +For time.Duration, eq will ensure that the value is equal to the duration given +in the parameter. + + Usage: eq=1h30m + Not Equal For strings & numbers, ne will ensure that the value is not equal to the parameter given. For slices, arrays, and maps, validates the number of items. +Example #1 + Usage: ne=10 +Example #2 (time.Duration) + +For time.Duration, ne will ensure that the value is not equal to the duration +given in the parameter. + + Usage: ne=1h30m + +One Of + +For strings, ints, and uints, oneof will ensure that the value +is one of the values in the parameter. The parameter should be +a list of values separated by whitespace. Values may be +strings or numbers. To match strings with spaces in them, include +the target string between single quotes. + + Usage: oneof=red green + oneof='red green' 'blue yellow' + oneof=5 7 9 + Greater Than For numbers, this will ensure that the value is greater than the @@ -303,11 +465,17 @@ For time.Time ensures the time value is greater than time.Now.UTC(). Usage: gt +Example #3 (time.Duration) + +For time.Duration, gt will ensure that the value is greater than the duration +given in the parameter. + + Usage: gt=1h30m + Greater Than or Equal Same as 'min' above. Kept both to make terminology with 'len' easier. - Example #1 Usage: gte=10 @@ -318,6 +486,13 @@ For time.Time ensures the time value is greater than or equal to time.Now.UTC(). Usage: gte +Example #3 (time.Duration) + +For time.Duration, gte will ensure that the value is greater than or equal to +the duration given in the parameter. + + Usage: gte=1h30m + Less Than For numbers, this will ensure that the value is less than the parameter given. @@ -329,10 +504,18 @@ Example #1 Usage: lt=10 Example #2 (time.Time) + For time.Time ensures the time value is less than time.Now.UTC(). Usage: lt +Example #3 (time.Duration) + +For time.Duration, lt will ensure that the value is less than the duration given +in the parameter. + + Usage: lt=1h30m + Less Than or Equal Same as 'max' above. Kept both to make terminology with 'len' easier. @@ -347,6 +530,13 @@ For time.Time ensures the time value is less than or equal to time.Now.UTC(). Usage: lte +Example #3 (time.Duration) + +For time.Duration, lte will ensure that the value is less than or equal to the +duration given in the parameter. + + Usage: lte=1h30m + Field Equals Another Field This will validate the field value against another fields value either within @@ -360,7 +550,7 @@ Example #1: Example #2: // Validating by field: - validate.FieldWithValue(password, confirmpassword, "eqfield") + validate.VarWithValue(password, confirmpassword, "eqfield") Field Equals Another Field (relative) @@ -382,7 +572,7 @@ Examples: Usage: nefield=Color2 // Validating by field: - validate.FieldWithValue(color1, color2, "nefield") + validate.VarWithValue(color1, color2, "nefield") Field Does Not Equal Another Field (relative) @@ -393,9 +583,9 @@ relative to the top level struct. Field Greater Than Another Field -Only valid for Numbers and time.Time types, this will validate the field value -against another fields value either within a struct or passed in field. -usage examples are for validation of a Start and End date: +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: Example #1: @@ -405,8 +595,7 @@ Example #1: Example #2: // Validating by field: - validate.FieldWithValue(start, end, "gtfield") - + validate.VarWithValue(start, end, "gtfield") Field Greater Than Another Relative Field @@ -417,9 +606,9 @@ relative to the top level struct. Field Greater Than or Equal To Another Field -Only valid for Numbers and time.Time types, this will validate the field value -against another fields value either within a struct or passed in field. -usage examples are for validation of a Start and End date: +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: Example #1: @@ -429,7 +618,7 @@ Example #1: Example #2: // Validating by field: - validate.FieldWithValue(start, end, "gtefield") + validate.VarWithValue(start, end, "gtefield") Field Greater Than or Equal To Another Relative Field @@ -440,9 +629,9 @@ to the top level struct. Less Than Another Field -Only valid for Numbers and time.Time types, this will validate the field value -against another fields value either within a struct or passed in field. -usage examples are for validation of a Start and End date: +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: Example #1: @@ -452,7 +641,7 @@ Example #1: Example #2: // Validating by field: - validate.FieldWithValue(start, end, "ltfield") + validate.VarWithValue(start, end, "ltfield") Less Than Another Relative Field @@ -463,9 +652,9 @@ to the top level struct. Less Than or Equal To Another Field -Only valid for Numbers and time.Time types, this will validate the field value -against another fields value either within a struct or passed in field. -usage examples are for validation of a Start and End date: +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: Example #1: @@ -475,7 +664,7 @@ Example #1: Example #2: // Validating by field: - validate.FieldWithValue(start, end, "ltefield") + validate.VarWithValue(start, end, "ltefield") Less Than or Equal To Another Relative Field @@ -484,22 +673,71 @@ to the top level struct. Usage: ltecsfield=InnerStructField.Field +Field Contains Another Field + +This does the same as contains except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: containsfield=InnerStructField.Field + +Field Excludes Another Field + +This does the same as excludes except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: excludesfield=InnerStructField.Field + +Unique + +For arrays & slices, unique will ensure that there are no duplicates. +For maps, unique will ensure that there are no duplicate values. +For slices of struct, unique will ensure that there are no duplicate values +in a field of the struct specified via a parameter. + + // For arrays, slices, and maps: + Usage: unique + + // For slices of struct: + Usage: unique=field + Alpha Only -This validates that a string value contains alpha characters only +This validates that a string value contains ASCII alpha characters only Usage: alpha Alphanumeric -This validates that a string value contains alphanumeric characters only +This validates that a string value contains ASCII alphanumeric characters only Usage: alphanum +Alpha Unicode + +This validates that a string value contains unicode alpha characters only + + Usage: alphaunicode + +Alphanumeric Unicode + +This validates that a string value contains unicode alphanumeric characters only + + Usage: alphanumunicode + +Number + +This validates that a string value contains number values only. +For integers or float it returns true. + + Usage: number + Numeric This validates that a string value contains a basic numeric value. basic excludes exponents etc... +for integers or float it returns true. Usage: numeric @@ -516,6 +754,18 @@ hashtag (#) Usage: hexcolor +Lowercase String + +This validates that a string value contains only lowercase characters. An empty string is not a valid lowercase string. + + Usage: lowercase + +Uppercase String + +This validates that a string value contains only uppercase characters. An empty string is not a valid uppercase string. + + Usage: uppercase + RGB String This validates that a string value contains a valid rgb color @@ -540,14 +790,35 @@ This validates that a string value contains a valid hsla color Usage: hsla +E.164 Phone Number String + +This validates that a string value contains a valid E.164 Phone number +https://en.wikipedia.org/wiki/E.164 (ex. +1123456789) + + Usage: e164 + E-mail String This validates that a string value contains a valid email This may not conform to all possibilities of any rfc standard, but neither -does any email provider accept all posibilities. +does any email provider accept all possibilities. Usage: email +JSON String + +This validates that a string value is valid JSON + + Usage: json + +File path + +This validates that a string value contains a valid file path and that +the file exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: file + URL String This validates that a string value contains a valid url @@ -563,6 +834,13 @@ This will accept any uri the golang request uri accepts Usage: uri +Urn RFC 2141 String + +This validataes that a string value contains a valid URN +according to the RFC 2141 spec. + + Usage: urn_rfc2141 + Base64 String This validates that a string value contains a valid base64 value. @@ -572,6 +850,39 @@ this with the omitempty tag. Usage: base64 +Base64URL String + +This validates that a string value contains a valid base64 URL safe value +according the the RFC4648 spec. +Although an empty string is a valid base64 URL safe value, this will report +an empty string as an error, if you wish to accept an empty string as valid +you can use this with the omitempty tag. + + Usage: base64url + +Bitcoin Address + +This validates that a string value contains a valid bitcoin address. +The format of the string is checked to ensure it matches one of the three formats +P2PKH, P2SH and performs checksum validation. + + Usage: btc_addr + +Bitcoin Bech32 Address (segwit) + +This validates that a string value contains a valid bitcoin Bech32 address as defined +by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) +Special thanks to Pieter Wuille for providng reference implementations. + + Usage: btc_addr_bech32 + +Ethereum Address + +This validates that a string value contains a valid ethereum address. +The format of the string is checked to ensure it matches the standard Ethereum address format. + + Usage: eth_addr + Contains This validates that a string value contains the substring value. @@ -610,6 +921,30 @@ This validates that a string value does not contain the supplied rune value. Usage: excludesrune=@ +Starts With + +This validates that a string value starts with the supplied string value + + Usage: startswith=hello + +Ends With + +This validates that a string value ends with the supplied string value + + Usage: endswith=goodbye + +Does Not Start With + +This validates that a string value does not start with the supplied string value + + Usage: startsnotwith=hello + +Does Not End With + +This validates that a string value does not end with the supplied string value + + Usage: endsnotwith=goodbye + International Standard Book Number This validates that a string value contains a valid isbn10 or isbn13 value. @@ -628,28 +963,27 @@ This validates that a string value contains a valid isbn13 value. Usage: isbn13 - Universally Unique Identifier UUID -This validates that a string value contains a valid UUID. +This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead. Usage: uuid Universally Unique Identifier UUID v3 -This validates that a string value contains a valid version 3 UUID. +This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead. Usage: uuid3 Universally Unique Identifier UUID v4 -This validates that a string value contains a valid version 4 UUID. +This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead. Usage: uuid4 Universally Unique Identifier UUID v5 -This validates that a string value contains a valid version 5 UUID. +This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead. Usage: uuid5 @@ -665,7 +999,7 @@ Printable ASCII This validates that a string value contains only printable ASCII characters. NOTE: if the string is blank, this validates as true. - Usage: asciiprint + Usage: printascii Multi-Byte Characters @@ -701,103 +1035,103 @@ This validates that a string value contains a valid U.S. Social Security Number. Internet Protocol Address IP -This validates that a string value contains a valid IP Adress. +This validates that a string value contains a valid IP Address. Usage: ip Internet Protocol Address IPv4 -This validates that a string value contains a valid v4 IP Adress. +This validates that a string value contains a valid v4 IP Address. Usage: ipv4 Internet Protocol Address IPv6 -This validates that a string value contains a valid v6 IP Adress. +This validates that a string value contains a valid v6 IP Address. Usage: ipv6 Classless Inter-Domain Routing CIDR -This validates that a string value contains a valid CIDR Adress. +This validates that a string value contains a valid CIDR Address. Usage: cidr Classless Inter-Domain Routing CIDRv4 -This validates that a string value contains a valid v4 CIDR Adress. +This validates that a string value contains a valid v4 CIDR Address. Usage: cidrv4 Classless Inter-Domain Routing CIDRv6 -This validates that a string value contains a valid v6 CIDR Adress. +This validates that a string value contains a valid v6 CIDR Address. Usage: cidrv6 Transmission Control Protocol Address TCP -This validates that a string value contains a valid resolvable TCP Adress. +This validates that a string value contains a valid resolvable TCP Address. Usage: tcp_addr Transmission Control Protocol Address TCPv4 -This validates that a string value contains a valid resolvable v4 TCP Adress. +This validates that a string value contains a valid resolvable v4 TCP Address. Usage: tcp4_addr Transmission Control Protocol Address TCPv6 -This validates that a string value contains a valid resolvable v6 TCP Adress. +This validates that a string value contains a valid resolvable v6 TCP Address. Usage: tcp6_addr User Datagram Protocol Address UDP -This validates that a string value contains a valid resolvable UDP Adress. +This validates that a string value contains a valid resolvable UDP Address. Usage: udp_addr User Datagram Protocol Address UDPv4 -This validates that a string value contains a valid resolvable v4 UDP Adress. +This validates that a string value contains a valid resolvable v4 UDP Address. Usage: udp4_addr User Datagram Protocol Address UDPv6 -This validates that a string value contains a valid resolvable v6 UDP Adress. +This validates that a string value contains a valid resolvable v6 UDP Address. Usage: udp6_addr Internet Protocol Address IP -This validates that a string value contains a valid resolvable IP Adress. +This validates that a string value contains a valid resolvable IP Address. Usage: ip_addr Internet Protocol Address IPv4 -This validates that a string value contains a valid resolvable v4 IP Adress. +This validates that a string value contains a valid resolvable v4 IP Address. Usage: ip4_addr Internet Protocol Address IPv6 -This validates that a string value contains a valid resolvable v6 IP Adress. +This validates that a string value contains a valid resolvable v6 IP Address. Usage: ip6_addr Unix domain socket end point Address -This validates that a string value contains a valid Unix Adress. +This validates that a string value contains a valid Unix Address. Usage: unix_addr Media Access Control Address MAC -This validates that a string value contains a valid MAC Adress. +This validates that a string value contains a valid MAC Address. Usage: mac @@ -805,6 +1139,97 @@ Note: See Go's ParseMAC for accepted formats and types: http://golang.org/src/net/mac.go?s=866:918#L29 +Hostname RFC 952 + +This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952 + + Usage: hostname + +Hostname RFC 1123 + +This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123 + + Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias. + +Full Qualified Domain Name (FQDN) + +This validates that a string value contains a valid FQDN. + + Usage: fqdn + +HTML Tags + +This validates that a string value appears to be an HTML element tag +including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element + + Usage: html + +HTML Encoded + +This validates that a string value is a proper character reference in decimal +or hexadecimal format + + Usage: html_encoded + +URL Encoded + +This validates that a string value is percent-encoded (URL encoded) according +to https://tools.ietf.org/html/rfc3986#section-2.1 + + Usage: url_encoded + +Directory + +This validates that a string value contains a valid directory and that +it exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: dir + +HostPort + +This validates that a string value contains a valid DNS hostname and port that +can be used to valiate fields typically passed to sockets and connections. + + Usage: hostname_port + +Datetime + +This validates that a string value is a valid datetime based on the supplied datetime format. +Supplied format must match the official Go time format layout as documented in https://golang.org/pkg/time/ + + Usage: datetime=2006-01-02 + +Iso3166-1 alpha-2 + +This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha2 + +Iso3166-1 alpha-3 + +This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha3 + +Iso3166-1 alpha-numeric + +This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha3 + +TimeZone + +This validates that a string value is a valid time zone based on the time zone database present on the system. +Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator. +More information on https://golang.org/pkg/time/#LoadLocation + + Usage: timezone + + Alias Validators and Tags NOTE: When returning an error, the tag returned in "FieldError" will be @@ -816,6 +1241,8 @@ Here is a list of the current built in alias tags: "iscolor" alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor) + "country_code" + alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code) Validator notes: @@ -824,7 +1251,7 @@ Validator notes: of a regex which conflict with the validation definitions. Although workarounds can be made, they take away from using pure regex's. Furthermore it's quick and dirty but the regex's become harder to - maintain and are not reusable, so it's as much a programming philosiphy + maintain and are not reusable, so it's as much a programming philosophy as anything. In place of this new validator functions should be created; a regex can @@ -834,6 +1261,35 @@ Validator notes: And the best reason, you can submit a pull request and we can keep on adding to the validation library of this package! +Non standard validators + +A collection of validation rules that are frequently needed but are more +complex than the ones found in the baked in validators. +A non standard validator must be registered manually like you would +with your own custom validation functions. + +Example of registration and use: + + type Test struct { + TestField string `validate:"yourtag"` + } + + t := &Test{ + TestField: "Test" + } + + validate := validator.New() + validate.RegisterValidation("yourtag", validators.NotBlank) + +Here is a list of the current non standard validators: + + NotBlank + This validates that the value is not blank or with length zero. + For strings ensures they do not contain only spaces. For channels, maps, slices and arrays + ensures they don't have zero length. For others, a non empty value is required. + + Usage: notblank + Panics This package panics when bad input is provided, this is by design, bad code like diff --git a/vendor/github.com/go-playground/validator/v10/errors.go b/vendor/github.com/go-playground/validator/v10/errors.go new file mode 100644 index 000000000..63293cf9e --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/errors.go @@ -0,0 +1,275 @@ +package validator + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + ut "github.com/go-playground/universal-translator" +) + +const ( + fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag" +) + +// ValidationErrorsTranslations is the translation return type +type ValidationErrorsTranslations map[string]string + +// InvalidValidationError describes an invalid argument passed to +// `Struct`, `StructExcept`, StructPartial` or `Field` +type InvalidValidationError struct { + Type reflect.Type +} + +// Error returns InvalidValidationError message +func (e *InvalidValidationError) Error() string { + + if e.Type == nil { + return "validator: (nil)" + } + + return "validator: (nil " + e.Type.String() + ")" +} + +// ValidationErrors is an array of FieldError's +// for use in custom error messages post validation. +type ValidationErrors []FieldError + +// Error is intended for use in development + debugging and not intended to be a production error message. +// It allows ValidationErrors to subscribe to the Error interface. +// All information to create an error message specific to your application is contained within +// the FieldError found within the ValidationErrors array +func (ve ValidationErrors) Error() string { + + buff := bytes.NewBufferString("") + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + + fe = ve[i].(*fieldError) + buff.WriteString(fe.Error()) + buff.WriteString("\n") + } + + return strings.TrimSpace(buff.String()) +} + +// Translate translates all of the ValidationErrors +func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations { + + trans := make(ValidationErrorsTranslations) + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + fe = ve[i].(*fieldError) + + // // in case an Anonymous struct was used, ensure that the key + // // would be 'Username' instead of ".Username" + // if len(fe.ns) > 0 && fe.ns[:1] == "." { + // trans[fe.ns[1:]] = fe.Translate(ut) + // continue + // } + + trans[fe.ns] = fe.Translate(ut) + } + + return trans +} + +// FieldError contains all functions to get error details +type FieldError interface { + + // returns the validation tag that failed. if the + // validation was an alias, this will return the + // alias name and not the underlying tag that failed. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "iscolor" + Tag() string + + // returns the validation tag that failed, even if an + // alias the actual tag within the alias will be returned. + // If an 'or' validation fails the entire or will be returned. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "hexcolor|rgb|rgba|hsl|hsla" + ActualTag() string + + // returns the namespace for the field error, with the tag + // name taking precedence over the field's actual name. + // + // eg. JSON name "User.fname" + // + // See StructNamespace() for a version that returns actual names. + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract it's name + Namespace() string + + // returns the namespace for the field error, with the field's + // actual name. + // + // eq. "User.FirstName" see Namespace for comparison + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract its name + StructNamespace() string + + // returns the fields name with the tag name taking precedence over the + // field's actual name. + // + // eq. JSON name "fname" + // see StructField for comparison + Field() string + + // returns the field's actual name from the struct, when able to determine. + // + // eq. "FirstName" + // see Field for comparison + StructField() string + + // returns the actual field's value in case needed for creating the error + // message + Value() interface{} + + // returns the param value, in string form for comparison; this will also + // help with generating an error message + Param() string + + // Kind returns the Field's reflect Kind + // + // eg. time.Time's kind is a struct + Kind() reflect.Kind + + // Type returns the Field's reflect Type + // + // // eg. time.Time's type is time.Time + Type() reflect.Type + + // returns the FieldError's translated error + // from the provided 'ut.Translator' and registered 'TranslationFunc' + // + // NOTE: if no registered translator can be found it returns the same as + // calling fe.Error() + Translate(ut ut.Translator) string + + // Error returns the FieldError's message + Error() string +} + +// compile time interface checks +var _ FieldError = new(fieldError) +var _ error = new(fieldError) + +// fieldError contains a single field's validation error along +// with other properties that may be needed for error message creation +// it complies with the FieldError interface +type fieldError struct { + v *Validate + tag string + actualTag string + ns string + structNs string + fieldLen uint8 + structfieldLen uint8 + value interface{} + param string + kind reflect.Kind + typ reflect.Type +} + +// Tag returns the validation tag that failed. +func (fe *fieldError) Tag() string { + return fe.tag +} + +// ActualTag returns the validation tag that failed, even if an +// alias the actual tag within the alias will be returned. +func (fe *fieldError) ActualTag() string { + return fe.actualTag +} + +// Namespace returns the namespace for the field error, with the tag +// name taking precedence over the field's actual name. +func (fe *fieldError) Namespace() string { + return fe.ns +} + +// StructNamespace returns the namespace for the field error, with the field's +// actual name. +func (fe *fieldError) StructNamespace() string { + return fe.structNs +} + +// Field returns the field's name with the tag name taking precedence over the +// field's actual name. +func (fe *fieldError) Field() string { + + return fe.ns[len(fe.ns)-int(fe.fieldLen):] + // // return fe.field + // fld := fe.ns[len(fe.ns)-int(fe.fieldLen):] + + // log.Println("FLD:", fld) + + // if len(fld) > 0 && fld[:1] == "." { + // return fld[1:] + // } + + // return fld +} + +// returns the field's actual name from the struct, when able to determine. +func (fe *fieldError) StructField() string { + // return fe.structField + return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):] +} + +// Value returns the actual field's value in case needed for creating the error +// message +func (fe *fieldError) Value() interface{} { + return fe.value +} + +// Param returns the param value, in string form for comparison; this will +// also help with generating an error message +func (fe *fieldError) Param() string { + return fe.param +} + +// Kind returns the Field's reflect Kind +func (fe *fieldError) Kind() reflect.Kind { + return fe.kind +} + +// Type returns the Field's reflect Type +func (fe *fieldError) Type() reflect.Type { + return fe.typ +} + +// Error returns the fieldError's error message +func (fe *fieldError) Error() string { + return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag) +} + +// Translate returns the FieldError's translated error +// from the provided 'ut.Translator' and registered 'TranslationFunc' +// +// NOTE: if no registered translation can be found, it returns the original +// untranslated error message. +func (fe *fieldError) Translate(ut ut.Translator) string { + + m, ok := fe.v.transTagFunc[ut] + if !ok { + return fe.Error() + } + + fn, ok := m[fe.tag] + if !ok { + return fe.Error() + } + + return fn(ut, fe) +} diff --git a/vendor/github.com/go-playground/validator/v10/field_level.go b/vendor/github.com/go-playground/validator/v10/field_level.go new file mode 100644 index 000000000..f0e2a9a85 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/field_level.go @@ -0,0 +1,119 @@ +package validator + +import "reflect" + +// FieldLevel contains all the information and helper functions +// to validate a field +type FieldLevel interface { + // returns the top level struct, if any + Top() reflect.Value + + // returns the current fields parent struct, if any or + // the comparison value if called 'VarWithValue' + Parent() reflect.Value + + // returns current field for validation + Field() reflect.Value + + // returns the field's name with the tag + // name taking precedence over the fields actual name. + FieldName() string + + // returns the struct field's name + StructFieldName() string + + // returns param for validation against current field + Param() string + + // GetTag returns the current validations tag name + GetTag() string + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and it's kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + // + // Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable. + GetStructFieldOK() (reflect.Value, reflect.Kind, bool) + + // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for + // the field and namespace allowing more extensibility for validators. + // + // Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable. + GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) + + // traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) + + // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for + // the field and namespace allowing more extensibility for validators. + GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) +} + +var _ FieldLevel = new(validate) + +// Field returns current field for validation +func (v *validate) Field() reflect.Value { + return v.flField +} + +// FieldName returns the field's name with the tag +// name taking precedence over the fields actual name. +func (v *validate) FieldName() string { + return v.cf.altName +} + +// GetTag returns the current validations tag name +func (v *validate) GetTag() string { + return v.ct.tag +} + +// StructFieldName returns the struct field's name +func (v *validate) StructFieldName() string { + return v.cf.name +} + +// Param returns param for validation against current field +func (v *validate) Param() string { + return v.ct.param +} + +// GetStructFieldOK returns Param returns param for validation against current field +// +// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable. +func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) { + current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param) + return current, kind, found +} + +// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for +// the field and namespace allowing more extensibility for validators. +// +// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable. +func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) { + current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace) + return current, kind, found +} + +// GetStructFieldOK returns Param returns param for validation against current field +func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) { + return v.getStructFieldOKInternal(v.slflParent, v.ct.param) +} + +// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for +// the field and namespace allowing more extensibility for validators. +func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) { + return v.getStructFieldOKInternal(val, namespace) +} diff --git a/vendor/github.com/go-playground/validator/v10/go.mod b/vendor/github.com/go-playground/validator/v10/go.mod new file mode 100644 index 000000000..d457100ea --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/go.mod @@ -0,0 +1,11 @@ +module github.com/go-playground/validator/v10 + +go 1.13 + +require ( + github.com/go-playground/assert/v2 v2.0.1 + github.com/go-playground/locales v0.13.0 + github.com/go-playground/universal-translator v0.17.0 + github.com/leodido/go-urn v1.2.0 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 +) diff --git a/vendor/github.com/go-playground/validator/v10/go.sum b/vendor/github.com/go-playground/validator/v10/go.sum new file mode 100644 index 000000000..015264273 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/go.sum @@ -0,0 +1,28 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/gopkg.in/go-playground/validator.v8/logo.png b/vendor/github.com/go-playground/validator/v10/logo.png similarity index 100% rename from vendor/gopkg.in/go-playground/validator.v8/logo.png rename to vendor/github.com/go-playground/validator/v10/logo.png diff --git a/vendor/github.com/go-playground/validator/v10/regexes.go b/vendor/github.com/go-playground/validator/v10/regexes.go new file mode 100644 index 000000000..b741f4e17 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/regexes.go @@ -0,0 +1,101 @@ +package validator + +import "regexp" + +const ( + alphaRegexString = "^[a-zA-Z]+$" + alphaNumericRegexString = "^[a-zA-Z0-9]+$" + alphaUnicodeRegexString = "^[\\p{L}]+$" + alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$" + numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$" + numberRegexString = "^[0-9]+$" + hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$" + hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$" + rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$" + hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + e164RegexString = "^\\+[1-9]?[0-9]{7,14}$" + base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$" + iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$" + iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$" + uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" + uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" + uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + aSCIIRegexString = "^[\x00-\x7F]*$" + printableASCIIRegexString = "^[\x20-\x7E]*$" + multibyteRegexString = "[^\x00-\x7F]" + dataURIRegexString = `^data:((?:\w+\/(?:([^;]|;[^;]).)+)?)` + latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$` + hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952 + hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123 + fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62})(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.') + btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address + btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + ethAddressRegexString = `^0x[0-9a-fA-F]{40}$` + ethAddressUpperRegexString = `^0x[0-9A-F]{40}$` + ethAddressLowerRegexString = `^0x[0-9a-f]{40}$` + uRLEncodedRegexString = `(%[A-Fa-f0-9]{2})` + hTMLEncodedRegexString = `&#[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?` + hTMLRegexString = `<[/]?([a-zA-Z]+).*?>` + splitParamsRegexString = `'[^']*'|\S+` +) + +var ( + alphaRegex = regexp.MustCompile(alphaRegexString) + alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) + alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString) + alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString) + numericRegex = regexp.MustCompile(numericRegexString) + numberRegex = regexp.MustCompile(numberRegexString) + hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) + hexcolorRegex = regexp.MustCompile(hexcolorRegexString) + rgbRegex = regexp.MustCompile(rgbRegexString) + rgbaRegex = regexp.MustCompile(rgbaRegexString) + hslRegex = regexp.MustCompile(hslRegexString) + hslaRegex = regexp.MustCompile(hslaRegexString) + e164Regex = regexp.MustCompile(e164RegexString) + emailRegex = regexp.MustCompile(emailRegexString) + base64Regex = regexp.MustCompile(base64RegexString) + base64URLRegex = regexp.MustCompile(base64URLRegexString) + iSBN10Regex = regexp.MustCompile(iSBN10RegexString) + iSBN13Regex = regexp.MustCompile(iSBN13RegexString) + uUID3Regex = regexp.MustCompile(uUID3RegexString) + uUID4Regex = regexp.MustCompile(uUID4RegexString) + uUID5Regex = regexp.MustCompile(uUID5RegexString) + uUIDRegex = regexp.MustCompile(uUIDRegexString) + uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString) + uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString) + uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString) + uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString) + aSCIIRegex = regexp.MustCompile(aSCIIRegexString) + printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) + multibyteRegex = regexp.MustCompile(multibyteRegexString) + dataURIRegex = regexp.MustCompile(dataURIRegexString) + latitudeRegex = regexp.MustCompile(latitudeRegexString) + longitudeRegex = regexp.MustCompile(longitudeRegexString) + sSNRegex = regexp.MustCompile(sSNRegexString) + hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952) + hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123) + fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123) + btcAddressRegex = regexp.MustCompile(btcAddressRegexString) + btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32) + btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32) + ethAddressRegex = regexp.MustCompile(ethAddressRegexString) + ethaddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString) + ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString) + uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString) + hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString) + hTMLRegex = regexp.MustCompile(hTMLRegexString) + splitParamsRegex = regexp.MustCompile(splitParamsRegexString) +) diff --git a/vendor/github.com/go-playground/validator/v10/struct_level.go b/vendor/github.com/go-playground/validator/v10/struct_level.go new file mode 100644 index 000000000..57691ee38 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/struct_level.go @@ -0,0 +1,175 @@ +package validator + +import ( + "context" + "reflect" +) + +// StructLevelFunc accepts all values needed for struct level validation +type StructLevelFunc func(sl StructLevel) + +// StructLevelFuncCtx accepts all values needed for struct level validation +// but also allows passing of contextual validation information via context.Context. +type StructLevelFuncCtx func(ctx context.Context, sl StructLevel) + +// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx +func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx { + return func(ctx context.Context, sl StructLevel) { + fn(sl) + } +} + +// StructLevel contains all the information and helper functions +// to validate a struct +type StructLevel interface { + + // returns the main validation object, in case one wants to call validations internally. + // this is so you don't have to use anonymous functions to get access to the validate + // instance. + Validator() *Validate + + // returns the top level struct, if any + Top() reflect.Value + + // returns the current fields parent struct, if any + Parent() reflect.Value + + // returns the current struct. + Current() reflect.Value + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and its kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // reports an error just by passing the field and tag information + // + // NOTES: + // + // fieldName and altName get appended to the existing namespace that + // validator is on. e.g. pass 'FirstName' or 'Names[0]' depending + // on the nesting + // + // tag can be an existing validation tag or just something you make up + // and process on the flip side it's up to you. + ReportError(field interface{}, fieldName, structFieldName string, tag, param string) + + // reports an error just by passing ValidationErrors + // + // NOTES: + // + // relativeNamespace and relativeActualNamespace get appended to the + // existing namespace that validator is on. + // e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending + // on the nesting. most of the time they will be blank, unless you validate + // at a level lower the the current field depth + ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors) +} + +var _ StructLevel = new(validate) + +// Top returns the top level struct +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an acurate value otherwise. +func (v *validate) Top() reflect.Value { + return v.top +} + +// Parent returns the current structs parent +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an acurate value otherwise. +func (v *validate) Parent() reflect.Value { + return v.slflParent +} + +// Current returns the current struct. +func (v *validate) Current() reflect.Value { + return v.slCurrent +} + +// Validator returns the main validation object, in case one want to call validations internally. +func (v *validate) Validator() *Validate { + return v.v +} + +// ExtractType gets the actual underlying type of field value. +func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) { + return v.extractTypeInternal(field, false) +} + +// ReportError reports an error just by passing the field and tag information +func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) { + + fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false) + + if len(structFieldName) == 0 { + structFieldName = fieldName + } + + v.str1 = string(append(v.ns, fieldName...)) + + if v.v.hasTagNameFunc || fieldName != structFieldName { + v.str2 = string(append(v.actualNs, structFieldName...)) + } else { + v.str2 = v.str1 + } + + if kind == reflect.Invalid { + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + param: param, + kind: kind, + }, + ) + return + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + value: fv.Interface(), + param: param, + kind: kind, + typ: fv.Type(), + }, + ) +} + +// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation. +// +// NOTE: this function prepends the current namespace to the relative ones. +func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) { + + var err *fieldError + + for i := 0; i < len(errs); i++ { + + err = errs[i].(*fieldError) + err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...)) + err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...)) + + v.errs = append(v.errs, err) + } +} diff --git a/vendor/github.com/go-playground/validator/v10/translations.go b/vendor/github.com/go-playground/validator/v10/translations.go new file mode 100644 index 000000000..4d9d75c13 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/translations.go @@ -0,0 +1,11 @@ +package validator + +import ut "github.com/go-playground/universal-translator" + +// TranslationFunc is the function type used to register or override +// custom translations +type TranslationFunc func(ut ut.Translator, fe FieldError) string + +// RegisterTranslationsFunc allows for registering of translations +// for a 'ut.Translator' for use within the 'TranslationFunc' +type RegisterTranslationsFunc func(ut ut.Translator) error diff --git a/vendor/gopkg.in/go-playground/validator.v8/util.go b/vendor/github.com/go-playground/validator/v10/util.go similarity index 52% rename from vendor/gopkg.in/go-playground/validator.v8/util.go rename to vendor/github.com/go-playground/validator/v10/util.go index e81157909..56420f430 100644 --- a/vendor/gopkg.in/go-playground/validator.v8/util.go +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -4,44 +4,15 @@ import ( "reflect" "strconv" "strings" + "time" ) -const ( - blank = "" - namespaceSeparator = "." - leftBracket = "[" - rightBracket = "]" - restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" - restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" - restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" -) - -var ( - restrictedTags = map[string]struct{}{ - diveTag: {}, - existsTag: {}, - structOnlyTag: {}, - omitempty: {}, - skipValidationTag: {}, - utf8HexComma: {}, - utf8Pipe: {}, - noStructLevelTag: {}, - } -) - -// ExtractType gets the actual underlying type of field value. +// extractTypeInternal gets the actual underlying type of field value. // It will dive into pointers, customTypes and return you the // underlying value and it's kind. -// it is exposed for use within you Custom Functions -func (v *Validate) ExtractType(current reflect.Value) (reflect.Value, reflect.Kind) { - - val, k, _ := v.extractTypeInternal(current, false) - return val, k -} - -// only exists to not break backward compatibility, needed to return the third param for a bug fix internally -func (v *Validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) { +func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) { +BEGIN: switch current.Kind() { case reflect.Ptr: @@ -51,7 +22,8 @@ func (v *Validate) extractTypeInternal(current reflect.Value, nullable bool) (re return current, reflect.Ptr, nullable } - return v.extractTypeInternal(current.Elem(), nullable) + current = current.Elem() + goto BEGIN case reflect.Interface: @@ -61,17 +33,19 @@ func (v *Validate) extractTypeInternal(current reflect.Value, nullable bool) (re return current, reflect.Interface, nullable } - return v.extractTypeInternal(current.Elem(), nullable) + current = current.Elem() + goto BEGIN case reflect.Invalid: return current, reflect.Invalid, nullable default: - if v.hasCustomFuncs { + if v.v.hasCustomFuncs { - if fn, ok := v.customTypeFuncs[current.Type()]; ok { - return v.extractTypeInternal(reflect.ValueOf(fn(current)), nullable) + if fn, ok := v.v.customFuncs[current.Type()]; ok { + current = reflect.ValueOf(fn(current)) + goto BEGIN } } @@ -79,35 +53,36 @@ func (v *Validate) extractTypeInternal(current reflect.Value, nullable bool) (re } } -// GetStructFieldOK traverses a struct to retrieve a specific field denoted by the provided namespace and +// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and // returns the field, field kind and whether is was successful in retrieving the field at all. +// // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field // could not be retrieved because it didn't exist. -func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) { - - current, kind := v.ExtractType(current) +func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) { +BEGIN: + current, kind, nullable = v.ExtractType(val) if kind == reflect.Invalid { - return current, kind, false + return } - if namespace == blank { - return current, kind, true + if namespace == "" { + found = true + return } switch kind { case reflect.Ptr, reflect.Interface: - - return current, kind, false + return case reflect.Struct: typ := current.Type() fld := namespace - ns := namespace + var ns string - if typ != timeType && typ != timePtrType { + if typ != timeType { idx := strings.Index(namespace, namespaceSeparator) @@ -115,7 +90,7 @@ func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (re fld = namespace[:idx] ns = namespace[idx+1:] } else { - ns = blank + ns = "" } bracketIdx := strings.Index(fld, leftBracket) @@ -125,9 +100,9 @@ func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (re ns = namespace[bracketIdx:] } - current = current.FieldByName(fld) - - return v.GetStructFieldOK(current, ns) + val = current.FieldByName(fld) + namespace = ns + goto BEGIN } case reflect.Array, reflect.Slice: @@ -137,7 +112,7 @@ func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (re arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2]) if arrIdx >= current.Len() { - return current, kind, false + return } startIdx := idx2 + 1 @@ -148,7 +123,9 @@ func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (re } } - return v.GetStructFieldOK(current.Index(arrIdx), namespace[startIdx:]) + val = current.Index(arrIdx) + namespace = namespace[startIdx:] + goto BEGIN case reflect.Map: idx := strings.Index(namespace, leftBracket) + 1 @@ -167,48 +144,76 @@ func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (re switch current.Type().Key().Kind() { case reflect.Int: i, _ := strconv.Atoi(key) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + case reflect.Int8: i, _ := strconv.ParseInt(key, 10, 8) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int8(i))), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(int8(i))) + namespace = namespace[endIdx+1:] + case reflect.Int16: i, _ := strconv.ParseInt(key, 10, 16) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int16(i))), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(int16(i))) + namespace = namespace[endIdx+1:] + case reflect.Int32: i, _ := strconv.ParseInt(key, 10, 32) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(int32(i))), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(int32(i))) + namespace = namespace[endIdx+1:] + case reflect.Int64: i, _ := strconv.ParseInt(key, 10, 64) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + case reflect.Uint: i, _ := strconv.ParseUint(key, 10, 0) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint(i))), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(uint(i))) + namespace = namespace[endIdx+1:] + case reflect.Uint8: i, _ := strconv.ParseUint(key, 10, 8) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint8(i))), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(uint8(i))) + namespace = namespace[endIdx+1:] + case reflect.Uint16: i, _ := strconv.ParseUint(key, 10, 16) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint16(i))), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(uint16(i))) + namespace = namespace[endIdx+1:] + case reflect.Uint32: i, _ := strconv.ParseUint(key, 10, 32) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(uint32(i))), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(uint32(i))) + namespace = namespace[endIdx+1:] + case reflect.Uint64: i, _ := strconv.ParseUint(key, 10, 64) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(i)), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + case reflect.Float32: f, _ := strconv.ParseFloat(key, 32) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(float32(f))), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(float32(f))) + namespace = namespace[endIdx+1:] + case reflect.Float64: f, _ := strconv.ParseFloat(key, 64) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(f)), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(f)) + namespace = namespace[endIdx+1:] + case reflect.Bool: b, _ := strconv.ParseBool(key) - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(b)), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(b)) + namespace = namespace[endIdx+1:] // reflect.Type = string default: - return v.GetStructFieldOK(current.MapIndex(reflect.ValueOf(key)), namespace[endIdx+1:]) + val = current.MapIndex(reflect.ValueOf(key)) + namespace = namespace[endIdx+1:] } + + goto BEGIN } // if got here there was more namespace, cannot go any deeper @@ -218,13 +223,34 @@ func (v *Validate) GetStructFieldOK(current reflect.Value, namespace string) (re // asInt returns the parameter as a int64 // or panics if it can't convert func asInt(param string) int64 { - i, err := strconv.ParseInt(param, 0, 64) panicIf(err) return i } +// asIntFromTimeDuration parses param as time.Duration and returns it as int64 +// or panics on error. +func asIntFromTimeDuration(param string) int64 { + d, err := time.ParseDuration(param) + if err != nil { + // attempt parsing as an an integer assuming nanosecond precision + return asInt(param) + } + return int64(d) +} + +// asIntFromType calls the proper function to parse param as int64, +// given a field's Type t. +func asIntFromType(t reflect.Type, param string) int64 { + switch t { + case timeDurationType: + return asIntFromTimeDuration(param) + default: + return asInt(param) + } +} + // asUint returns the parameter as a uint64 // or panics if it can't convert func asUint(param string) uint64 { @@ -245,6 +271,16 @@ func asFloat(param string) float64 { return i } +// asBool returns the parameter as a bool +// or panics if it can't convert +func asBool(param string) bool { + + i, err := strconv.ParseBool(param) + panicIf(err) + + return i +} + func panicIf(err error) { if err != nil { panic(err.Error()) diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go new file mode 100644 index 000000000..f097f3942 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/validator.go @@ -0,0 +1,477 @@ +package validator + +import ( + "context" + "fmt" + "reflect" + "strconv" +) + +// per validate construct +type validate struct { + v *Validate + top reflect.Value + ns []byte + actualNs []byte + errs ValidationErrors + includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise + ffn FilterFunc + slflParent reflect.Value // StructLevel & FieldLevel + slCurrent reflect.Value // StructLevel & FieldLevel + flField reflect.Value // StructLevel & FieldLevel + cf *cField // StructLevel & FieldLevel + ct *cTag // StructLevel & FieldLevel + misc []byte // misc reusable + str1 string // misc reusable + str2 string // misc reusable + fldIsPointer bool // StructLevel & FieldLevel + isPartial bool + hasExcludes bool +} + +// parent and current will be the same the first run of validateStruct +func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) { + + cs, ok := v.v.structCache.Get(typ) + if !ok { + cs = v.v.extractStructCache(current, typ.Name()) + } + + if len(ns) == 0 && len(cs.name) != 0 { + + ns = append(ns, cs.name...) + ns = append(ns, '.') + + structNs = append(structNs, cs.name...) + structNs = append(structNs, '.') + } + + // ct is nil on top level struct, and structs as fields that have no tag info + // so if nil or if not nil and the structonly tag isn't present + if ct == nil || ct.typeof != typeStructOnly { + + var f *cField + + for i := 0; i < len(cs.fields); i++ { + + f = cs.fields[i] + + if v.isPartial { + + if v.ffn != nil { + // used with StructFiltered + if v.ffn(append(structNs, f.name...)) { + continue + } + + } else { + // used with StructPartial & StructExcept + _, ok = v.includeExclude[string(append(structNs, f.name...))] + + if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) { + continue + } + } + } + + v.traverseField(ctx, parent, current.Field(f.idx), ns, structNs, f, f.cTags) + } + } + + // check if any struct level validations, after all field validations already checked. + // first iteration will have no info about nostructlevel tag, and is checked prior to + // calling the next iteration of validateStruct called from traverseField. + if cs.fn != nil { + + v.slflParent = parent + v.slCurrent = current + v.ns = ns + v.actualNs = structNs + + cs.fn(ctx, v) + } +} + +// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options +func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) { + var typ reflect.Type + var kind reflect.Kind + + current, kind, v.fldIsPointer = v.extractTypeInternal(current, false) + + switch kind { + case reflect.Ptr, reflect.Interface, reflect.Invalid: + + if ct == nil { + return + } + + if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault { + return + } + + if ct.hasTag { + if kind == reflect.Invalid { + v.str1 = string(append(ns, cf.altName...)) + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + param: ct.param, + kind: kind, + }, + ) + return + } + + v.str1 = string(append(ns, cf.altName...)) + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + if !ct.runValidationWhenNil { + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: current.Type(), + }, + ) + return + } + } + + case reflect.Struct: + + typ = current.Type() + + if typ != timeType { + + if ct != nil { + + if ct.typeof == typeStructOnly { + goto CONTINUE + } else if ct.typeof == typeIsDefault { + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + return + } + } + + ct = ct.next + } + + if ct != nil && ct.typeof == typeNoStructLevel { + return + } + + CONTINUE: + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, current, current, typ, ns, structNs, ct) + return + } + } + + if !ct.hasTag { + return + } + + typ = current.Type() + +OUTER: + for { + if ct == nil { + return + } + + switch ct.typeof { + + case typeOmitEmpty: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !hasValue(v) { + return + } + + ct = ct.next + continue + + case typeEndKeys: + return + + case typeDive: + + ct = ct.next + + // traverse slice or map here + // or panic ;) + switch kind { + case reflect.Slice, reflect.Array: + + var i64 int64 + reusableCF := &cField{} + + for i := 0; i < current.Len(); i++ { + + i64 = int64(i) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct) + } + + case reflect.Map: + + var pv string + reusableCF := &cField{} + + for _, key := range current.MapKeys() { + + pv = fmt.Sprintf("%v", key.Interface()) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + + if ct != nil && ct.typeof == typeKeys && ct.keys != nil { + v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys) + // can be nil when just keys being validated + if ct.next != nil { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next) + } + } else { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct) + } + } + + default: + // throw error, if not a slice or map then should not have gotten here + // bad dive tag + panic("dive error! can't dive on a non slice or map") + } + + return + + case typeOr: + + v.misc = v.misc[0:0] + + for { + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if ct.fn(ctx, v) { + + // drain rest of the 'or' values, then continue or leave + for { + + ct = ct.next + + if ct == nil { + return + } + + if ct.typeof != typeOr { + continue OUTER + } + } + } + + v.misc = append(v.misc, '|') + v.misc = append(v.misc, ct.tag...) + + if ct.hasParam { + v.misc = append(v.misc, '=') + v.misc = append(v.misc, ct.param...) + } + + if ct.isBlockEnd || ct.next == nil { + // if we get here, no valid 'or' value and no more tags + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + if ct.hasAlias { + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.actualAliasTag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + } else { + + tVal := string(v.misc)[1:] + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tVal, + actualTag: tVal, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + } + + return + } + + ct = ct.next + } + + default: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + return + } + ct = ct.next + } + } + +} diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go new file mode 100644 index 000000000..fe6a48775 --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -0,0 +1,619 @@ +package validator + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + ut "github.com/go-playground/universal-translator" +) + +const ( + defaultTagName = "validate" + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" + tagSeparator = "," + orSeparator = "|" + tagKeySeparator = "=" + structOnlyTag = "structonly" + noStructLevelTag = "nostructlevel" + omitempty = "omitempty" + isdefault = "isdefault" + requiredWithoutAllTag = "required_without_all" + requiredWithoutTag = "required_without" + requiredWithTag = "required_with" + requiredWithAllTag = "required_with_all" + requiredIfTag = "required_if" + requiredUnlessTag = "required_unless" + skipValidationTag = "-" + diveTag = "dive" + keysTag = "keys" + endKeysTag = "endkeys" + requiredTag = "required" + namespaceSeparator = "." + leftBracket = "[" + rightBracket = "]" + restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" + restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" + restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" +) + +var ( + timeDurationType = reflect.TypeOf(time.Duration(0)) + timeType = reflect.TypeOf(time.Time{}) + + defaultCField = &cField{namesEqual: true} +) + +// FilterFunc is the type used to filter fields using +// StructFiltered(...) function. +// returning true results in the field being filtered/skiped from +// validation +type FilterFunc func(ns []byte) bool + +// CustomTypeFunc allows for overriding or adding custom field type handler functions +// field = field value of the type to return a value to be validated +// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29 +type CustomTypeFunc func(field reflect.Value) interface{} + +// TagNameFunc allows for adding of a custom tag name parser +type TagNameFunc func(field reflect.StructField) string + +type internalValidationFuncWrapper struct { + fn FuncCtx + runValidatinOnNil bool +} + +// Validate contains the validator settings and cache +type Validate struct { + tagName string + pool *sync.Pool + hasCustomFuncs bool + hasTagNameFunc bool + tagNameFunc TagNameFunc + structLevelFuncs map[reflect.Type]StructLevelFuncCtx + customFuncs map[reflect.Type]CustomTypeFunc + aliases map[string]string + validations map[string]internalValidationFuncWrapper + transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc + tagCache *tagCache + structCache *structCache +} + +// New returns a new instance of 'validate' with sane defaults. +func New() *Validate { + + tc := new(tagCache) + tc.m.Store(make(map[string]*cTag)) + + sc := new(structCache) + sc.m.Store(make(map[reflect.Type]*cStruct)) + + v := &Validate{ + tagName: defaultTagName, + aliases: make(map[string]string, len(bakedInAliases)), + validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)), + tagCache: tc, + structCache: sc, + } + + // must copy alias validators for separate validations to be used in each validator instance + for k, val := range bakedInAliases { + v.RegisterAlias(k, val) + } + + // must copy validators for separate validations to be used in each instance + for k, val := range bakedInValidators { + + switch k { + // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour + case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag: + _ = v.registerValidation(k, wrapFunc(val), true, true) + default: + // no need to error check here, baked in will always be valid + _ = v.registerValidation(k, wrapFunc(val), true, false) + } + } + + v.pool = &sync.Pool{ + New: func() interface{} { + return &validate{ + v: v, + ns: make([]byte, 0, 64), + actualNs: make([]byte, 0, 64), + misc: make([]byte, 32), + } + }, + } + + return v +} + +// SetTagName allows for changing of the default tag name of 'validate' +func (v *Validate) SetTagName(name string) { + v.tagName = name +} + +// RegisterTagNameFunc registers a function to get alternate names for StructFields. +// +// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names: +// +// validate.RegisterTagNameFunc(func(fld reflect.StructField) string { +// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0] +// if name == "-" { +// return "" +// } +// return name +// }) +func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) { + v.tagNameFunc = fn + v.hasTagNameFunc = true +} + +// RegisterValidation adds a validation with the given tag +// +// NOTES: +// - if the key already exists, the previous validation function will be replaced. +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error { + return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...) +} + +// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation +// allowing context.Context validation support. +func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error { + var nilCheckable bool + if len(callValidationEvenIfNull) > 0 { + nilCheckable = callValidationEvenIfNull[0] + } + return v.registerValidation(tag, fn, false, nilCheckable) +} + +func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error { + if len(tag) == 0 { + return errors.New("Function Key cannot be empty") + } + + if fn == nil { + return errors.New("Function cannot be empty") + } + + _, ok := restrictedTags[tag] + if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) { + panic(fmt.Sprintf(restrictedTagErr, tag)) + } + v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable} + return nil +} + +// RegisterAlias registers a mapping of a single validation tag that +// defines a common or complex set of validation(s) to simplify adding validation +// to structs. +// +// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterAlias(alias, tags string) { + + _, ok := restrictedTags[alias] + + if ok || strings.ContainsAny(alias, restrictedTagChars) { + panic(fmt.Sprintf(restrictedAliasErr, alias)) + } + + v.aliases[alias] = tags +} + +// RegisterStructValidation registers a StructLevelFunc against a number of types. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) { + v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...) +} + +// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing +// of contextual validation information via context.Context. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) { + + if v.structLevelFuncs == nil { + v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx) + } + + for _, t := range types { + tv := reflect.ValueOf(t) + if tv.Kind() == reflect.Ptr { + t = reflect.Indirect(tv).Interface() + } + + v.structLevelFuncs[reflect.TypeOf(t)] = fn + } +} + +// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types +// +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) { + + if v.customFuncs == nil { + v.customFuncs = make(map[reflect.Type]CustomTypeFunc) + } + + for _, t := range types { + v.customFuncs[reflect.TypeOf(t)] = fn + } + + v.hasCustomFuncs = true +} + +// RegisterTranslation registers translations against the provided tag. +func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) { + + if v.transTagFunc == nil { + v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc) + } + + if err = registerFn(trans); err != nil { + return + } + + m, ok := v.transTagFunc[trans] + if !ok { + m = make(map[string]TranslationFunc) + v.transTagFunc[trans] = m + } + + m[tag] = translationFn + + return +} + +// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) Struct(s interface{}) error { + return v.StructCtx(context.Background(), s) +} + +// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified +// and also allows passing of context.Context for contextual validation information. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) { + + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = false + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error { + return v.StructFilteredCtx(context.Background(), s, fn) +} + +// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified and also allows passing of contextual validation information via +// context.Context +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = fn + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructPartial validates the fields passed in only, ignoring all others. +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartial(s interface{}, fields ...string) error { + return v.StructPartialCtx(context.Background(), s, fields...) +} + +// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual +// validation validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = false + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, k := range fields { + + flds := strings.Split(k, namespaceSeparator) + if len(flds) > 0 { + + vd.misc = append(vd.misc[0:0], name...) + vd.misc = append(vd.misc, '.') + + for _, s := range flds { + + idx := strings.Index(s, leftBracket) + + if idx != -1 { + for idx != -1 { + vd.misc = append(vd.misc, s[:idx]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + + idx2 := strings.Index(s, rightBracket) + idx2++ + vd.misc = append(vd.misc, s[idx:idx2]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + s = s[idx2:] + idx = strings.Index(s, leftBracket) + } + } else { + + vd.misc = append(vd.misc, s...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.misc = append(vd.misc, '.') + } + } + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructExcept validates all fields except the ones passed in. +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExcept(s interface{}, fields ...string) error { + return v.StructExceptCtx(context.Background(), s, fields...) +} + +// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual +// validation validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = true + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, key := range fields { + + vd.misc = vd.misc[0:0] + + if len(name) > 0 { + vd.misc = append(vd.misc, name...) + vd.misc = append(vd.misc, '.') + } + + vd.misc = append(vd.misc, key...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// Var validates a single variable using tag style validation. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) Var(field interface{}, tag string) error { + return v.VarCtx(context.Background(), field, tag) +} + +// VarCtx validates a single variable using tag style validation and allows passing of contextual +// validation validation information via context.Context. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + + ctag := v.fetchCacheTag(tag) + val := reflect.ValueOf(field) + vd := v.pool.Get().(*validate) + vd.top = val + vd.isPartial = false + vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} + +// VarWithValue validates a single variable, against another variable/field's value using tag style validation +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error { + return v.VarWithValueCtx(context.Background(), field, other, tag) +} + +// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and +// allows passing of contextual validation validation information via context.Context. +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + ctag := v.fetchCacheTag(tag) + otherVal := reflect.ValueOf(other) + vd := v.pool.Get().(*validate) + vd.top = otherVal + vd.isPartial = false + vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE index 1b1b1921e..0f646931a 100644 --- a/vendor/github.com/golang/protobuf/LICENSE +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -1,7 +1,4 @@ -Go support for Protocol Buffers - Google's data interchange format - Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 000000000..e810e6fea --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249f7..000000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index d9aa3c42d..000000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,428 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - // x -= 0x80 << 63 // Always zero. - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 000000000..d399bf069 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 000000000..e8db57e09 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,113 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: Do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: Do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: Do not use. +func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index dea2617ce..2187e877f 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -1,48 +1,13 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" + "google.golang.org/protobuf/reflect/protoreflect" ) -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -51,300 +16,43 @@ type generatedDiscarder interface { // marshal to be able to produce a message that continues to have those // unrecognized fields. To avoid this, DiscardUnknown is used to // explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di + if m != nil { + discardUnknown(MessageReflect(m)) } - return di } -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) } } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue } - vf := v.Field(i) - tf := f.Type + return true + }) - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) } } diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index c27d35f86..000000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,221 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "fmt" - "reflect" -) - -// RequiredNotSetError is the error returned if Marshal is called with -// a protocol buffer struct whose required fields have not -// all been initialized. It is also the error returned if Unmarshal is -// called with an encoded protocol buffer that does not include all the -// required fields. -// -// When printed, RequiredNotSetError reports the first unset required field in a -// message. If the field cannot be precisely determined, it is reported as -// "{Unknown}". -type RequiredNotSetError struct { - field string -} - -func (e *RequiredNotSetError) Error() string { - return fmt.Sprintf("proto: required field %q not set", e.field) -} - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c1..000000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 816a3b9d6..42fc120c9 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -1,543 +1,356 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Types and routines for supporting protocol buffer extensions. - */ - import ( "errors" "fmt" - "io" "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") var errNotExtendable = errors.New("proto: not an extendable proto.Message") -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false } -} -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has }) - e.p.extensionMap = make(map[int32]Extension) } - return e.p.extensionMap -} -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte + return has } -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false + }) } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok + clearUnknown(mr, fieldNum(xt.Field)) } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil } - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) + } + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) } - return e.value, nil } - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: + return nil, ErrMissingExtension } - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil + return v, nil } -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil } + return protoregistry.GlobalTypes.FindExtensionByName(field) +} - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil } + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable } - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) + if err != nil { + if err == ErrMissingExtension { + continue + } + return vs, err + } + vs[i] = v } - return value.Interface(), nil + return vs, nil } -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable + } - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) + } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() } + } - if len(b) == 0 { - break - } + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - return value.Interface(), nil + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) + return nil } -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) } + b0 = b0[n:] } - return -} -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd } } - - extensions = append(extensions, desc) + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] } - return extensions, nil -} -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) } + return xts, nil +} - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + return true + default: + return false } } -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) + } + bi = bi[n:] } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) } - m[desc.Field] = desc } -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 } diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index 0e2191b8a..000000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,921 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "errors" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true - -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index 3b6ca41d5..000000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,314 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" - "sort" - "sync" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad9083..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d9..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index f710adab0..dcdc2202f 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -1,163 +1,104 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - import ( "fmt" - "log" - "os" "reflect" - "sort" "strconv" "strings" "sync" -) - -const debug bool = false -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" ) -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. + // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. WireType int - Tag int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. Required bool + // Optional reports whether this is a optional field. Optional bool + // Repeated reports whether this is a repeated field. Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field; set for []byte only - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} - mtype reflect.Type // set for map types only - mkeyprop *Properties // set for map types only - mvalprop *Properties // set for map types only +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) + s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } @@ -171,18 +112,21 @@ func (p *Properties) String() string { s += ",packed" } s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { + if p.JSONName != "" { s += ",json=" + p.JSONName } - if p.proto3 { + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { s += ",proto3" } - if p.oneof { + if p.Oneof { s += ",oneof" } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } if p.HasDefault { s += ",def=" + p.Default } @@ -190,355 +134,173 @@ func (p *Properties) String() string { } // Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": p.Optional = true - case f == "rep": + case s == "req": + p.Required = true + case s == "rep": p.Repeated = true - case f == "packed": + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.mkeyprop = &Properties{} - p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.mvalprop = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) + p.Default, i = tag[len("def="):], len(tag) } + tag = strings.TrimPrefix(tag[i:], ",") } } -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - // Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } } -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) +var propertiesCache sync.Map // map[reflect.Type]*StructProperties -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - if collectStats { - stats.Chit++ - } - return sprop + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) } -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } - return prop - } - if collectStats { - stats.Cmiss++ +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } + var hasOneof bool prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) + // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { - f := t.Field(i) p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof } - } - // Re-order prop.order. - sort.Sort(prop) + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field + } - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + prop.Prop = append(prop.Prop, p) } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - // Interpret oneof metadata. + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } + prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i } - prop.reqCount = reqCount return prop } -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 000000000..5aee89c32 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 000000000..1e7ff6420 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,323 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { + b = fd.ProtoLegacyRawDesc() + } else { + // TODO: Use protodesc.ToFileDescriptorProto to construct + // a descriptorpb.FileDescriptorProto and marshal it. + // However, doing so causes the proto package to have a dependency + // on descriptorpb, leading to cyclic dependency issues. + } + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 0f212b302..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2681 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errreq error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required && errreq == nil { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - errreq = &RequiredNotSetError{f.name} - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errreq -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err, errreq error - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, errreq - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err, errreq error - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, errreq - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if err != nil { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != nil && err != ErrNil { // allow nil value in map - return b, err - } - } - return b, nil - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { - return b, err - } - } - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { - return b, err - } - } - return b, nil -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if err != nil { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if err != nil { - return b, err - } - } - return b, nil -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if err != nil { - return b, err - } - } - return b, nil -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def6a..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index 55f0340a3..000000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,1967 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - rnse = r - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if rnse != nil { - // A required field of a submessage/group is missing. Return that error. - return rnse - } - if reqMask != u.reqMask { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - return &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return nil -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask) - } - - // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] - tag, err := strconv.Atoi(tagstr) - if err != nil { - panic("protobuf tag field not an integer: " + tagstr) - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(tag, of.field, unmarshal, 0) - } - } - } - } - - // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0) - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - if !utf8.ValidString(v) { - return nil, errInvalidUTF8 - } - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if err == nil { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nil - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if err != nil { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nil - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) <= 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 2205fdaad..000000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.mkeyprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.mvalprop); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 000000000..4a5931009 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 000000000..a31134eeb --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index 0685bae36..000000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.mkeyprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.mvalprop); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 000000000..d7c28da5a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 000000000..398e34859 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml new file mode 100644 index 000000000..d8156a60b --- /dev/null +++ b/vendor/github.com/google/uuid/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.4.3 + - 1.5.3 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 000000000..04fdf09f1 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 000000000..b4bb97f6b --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 000000000..5dc68268d --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 000000000..f765a46f9 --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 000000000..fa820b9d3 --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 000000000..5b8a4b9af --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 000000000..fc84cd79d --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 000000000..b404f4bec --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) //nolint:errcheck + h.Write(data) //nolint:errcheck + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 000000000..14bd34072 --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,38 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + return err + } + *uuid = id + return nil +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 000000000..d651a2b06 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 000000000..24b78edc9 --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 000000000..0cbbcddbd --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go new file mode 100644 index 000000000..d7fcbf286 --- /dev/null +++ b/vendor/github.com/google/uuid/null.go @@ -0,0 +1,118 @@ +// Copyright 2021 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +var jsonNull = []byte("null") + +// NullUUID represents a UUID that may be null. +// NullUUID implements the SQL driver.Scanner interface so +// it can be used as a scan destination: +// +// var u uuid.NullUUID +// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u) +// ... +// if u.Valid { +// // use u.UUID +// } else { +// // NULL value +// } +// +type NullUUID struct { + UUID UUID + Valid bool // Valid is true if UUID is not NULL +} + +// Scan implements the SQL driver.Scanner interface. +func (nu *NullUUID) Scan(value interface{}) error { + if value == nil { + nu.UUID, nu.Valid = Nil, false + return nil + } + + err := nu.UUID.Scan(value) + if err != nil { + nu.Valid = false + return err + } + + nu.Valid = true + return nil +} + +// Value implements the driver Valuer interface. +func (nu NullUUID) Value() (driver.Value, error) { + if !nu.Valid { + return nil, nil + } + // Delegate to UUID Value function + return nu.UUID.Value() +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (nu NullUUID) MarshalBinary() ([]byte, error) { + if nu.Valid { + return nu.UUID[:], nil + } + + return []byte(nil), nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (nu *NullUUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(nu.UUID[:], data) + nu.Valid = true + return nil +} + +// MarshalText implements encoding.TextMarshaler. +func (nu NullUUID) MarshalText() ([]byte, error) { + if nu.Valid { + return nu.UUID.MarshalText() + } + + return jsonNull, nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (nu *NullUUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err != nil { + nu.Valid = false + return err + } + nu.UUID = id + nu.Valid = true + return nil +} + +// MarshalJSON implements json.Marshaler. +func (nu NullUUID) MarshalJSON() ([]byte, error) { + if nu.Valid { + return json.Marshal(nu.UUID) + } + + return jsonNull, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (nu *NullUUID) UnmarshalJSON(data []byte) error { + if bytes.Equal(data, jsonNull) { + *nu = NullUUID{} + return nil // valid null UUID + } + err := json.Unmarshal(data, &nu.UUID) + nu.Valid = err == nil + return err +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 000000000..2e02ec06c --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently. +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 000000000..e6ef06cdc --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 000000000..5ea6c7378 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 000000000..a57207aeb --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,294 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" + "sync" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +const randPoolSize = 16 * 16 + +var ( + rander = rand.Reader // random function + poolEnabled = false + poolMu sync.Mutex + poolPos = randPoolSize // protected with poolMu + pool [randPoolSize]byte // protected with poolMu +) + +type invalidLengthError struct{ len int } + +func (err invalidLengthError) Error() string { + return fmt.Sprintf("invalid UUID length: %d", err.len) +} + +// IsInvalidLengthError is matcher function for custom error invalidLengthError +func IsInvalidLengthError(err error) bool { + _, ok := err.(invalidLengthError) + return ok +} + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(s)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, invalidLengthError{len(b)} + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} + +// EnableRandPool enables internal randomness pool used for Random +// (Version 4) UUID generation. The pool contains random bytes read from +// the random number generator on demand in batches. Enabling the pool +// may improve the UUID generation throughput significantly. +// +// Since the pool is stored on the Go heap, this feature may be a bad fit +// for security sensitive applications. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func EnableRandPool() { + poolEnabled = true +} + +// DisableRandPool disables the randomness pool if it was previously +// enabled with EnableRandPool. +// +// Both EnableRandPool and DisableRandPool are not thread-safe and should +// only be called when there is no possibility that New or any other +// UUID Version 4 generation function will be called concurrently. +func DisableRandPool() { + poolEnabled = false + defer poolMu.Unlock() + poolMu.Lock() + poolPos = randPoolSize +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 000000000..463109629 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 000000000..7697802e4 --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewString creates a new random UUID and returns it as a string or panics. +// NewString is equivalent to the expression +// +// uuid.New().String() +func NewString() string { + return Must(NewRandom()).String() +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// Uses the randomness pool if it was enabled with EnableRandPool. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + if !poolEnabled { + return NewRandomFromReader(rander) + } + return newRandomFromPool() +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + +func newRandomFromPool() (UUID, error) { + var uuid UUID + poolMu.Lock() + if poolPos == randPoolSize { + _, err := io.ReadFull(rander, pool[:]) + if err != nil { + poolMu.Unlock() + return Nil, err + } + poolPos = 0 + } + copy(uuid[:], pool[poolPos:(poolPos+16)]) + poolPos += 16 + poolMu.Unlock() + + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/.gitignore b/vendor/github.com/graph-gophers/graphql-go/.gitignore new file mode 100644 index 000000000..2fa95abef --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/.gitignore @@ -0,0 +1,5 @@ +/.idea +/.vscode +/internal/validation/testdata/graphql-js +/internal/validation/testdata/node_modules +/vendor diff --git a/vendor/github.com/graph-gophers/graphql-go/.golangci.yml b/vendor/github.com/graph-gophers/graphql-go/.golangci.yml new file mode 100644 index 000000000..c6741d58a --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/.golangci.yml @@ -0,0 +1,35 @@ +run: + timeout: 5m + +linters-settings: + gofmt: + simplify: true + govet: + check-shadowing: true + enable-all: true + disable: + - fieldalignment + - deepequalerrors # remove later + +linters: + disable-all: true + enable: + - deadcode + - gofmt + - gosimple + - govet + - ineffassign + - exportloopref + - structcheck + - staticcheck + - unconvert + - unused + - varcheck + - misspell + - goimports + +issues: + exclude-rules: + - linters: + - unused + path: "graphql_test.go" \ No newline at end of file diff --git a/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md b/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md new file mode 100644 index 000000000..e5f48c069 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/CHANGELOG.md @@ -0,0 +1,10 @@ +CHANGELOG + +[v1.1.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.1.0) Release v1.1.0 +* [FEATURE] Add types package #437 +* [FEATURE] Expose `packer.Unmarshaler` as `decode.Unmarshaler` to the public #450 +* [FEATURE] Add location fields to type definitions #454 +* [FEATURE] `errors.Errorf` preserves original error similar to `fmt.Errorf` #456 +* [BUGFIX] Fix duplicated __typename in response (fixes #369) #443 + +[v1.0.0](https://github.com/graph-gophers/graphql-go/releases/tag/v1.0.0) Initial release diff --git a/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md b/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md new file mode 100644 index 000000000..a2cffca8c --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/CONTRIBUTING.md @@ -0,0 +1,13 @@ +## Contributing + +- With issues: + - Use the search tool before opening a new issue. + - Please provide source code and commit sha if you found a bug. + - Review existing issues and provide feedback or react to them. + +- With pull requests: + - Open your pull request against `master` + - Your pull request should have no more than two commits, if not you should squash them. + - It should pass all tests in the available continuous integrations systems such as TravisCI. + - You should add/modify tests to cover your proposed code changes. + - If your pull request contains a new feature, please document it on the README. \ No newline at end of file diff --git a/vendor/github.com/graph-gophers/graphql-go/LICENSE b/vendor/github.com/graph-gophers/graphql-go/LICENSE new file mode 100644 index 000000000..3907cecac --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2016 Richard Musiol. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/graph-gophers/graphql-go/README.md b/vendor/github.com/graph-gophers/graphql-go/README.md new file mode 100644 index 000000000..87b020c13 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/README.md @@ -0,0 +1,169 @@ +# graphql-go [![Sourcegraph](https://sourcegraph.com/github.com/graph-gophers/graphql-go/-/badge.svg)](https://sourcegraph.com/github.com/graph-gophers/graphql-go?badge) [![Build Status](https://graph-gophers.semaphoreci.com/badges/graphql-go/branches/master.svg?style=shields)](https://graph-gophers.semaphoreci.com/projects/graphql-go) [![GoDoc](https://godoc.org/github.com/graph-gophers/graphql-go?status.svg)](https://godoc.org/github.com/graph-gophers/graphql-go) + +

+ +The goal of this project is to provide full support of the [GraphQL draft specification](https://facebook.github.io/graphql/draft) with a set of idiomatic, easy to use Go packages. + +While still under heavy development (`internal` APIs are almost certainly subject to change), this library is +safe for production use. + +## Features + +- minimal API +- support for `context.Context` +- support for the `OpenTracing` standard +- schema type-checking against resolvers +- resolvers are matched to the schema based on method sets (can resolve a GraphQL schema with a Go interface or Go struct). +- handles panics in resolvers +- parallel execution of resolvers +- subscriptions + - [sample WS transport](https://github.com/graph-gophers/graphql-transport-ws) + +## Roadmap + +We're trying out the GitHub Project feature to manage `graphql-go`'s [development roadmap](https://github.com/graph-gophers/graphql-go/projects/1). +Feedback is welcome and appreciated. + +## (Some) Documentation + +### Basic Sample + +```go +package main + +import ( + "log" + "net/http" + + graphql "github.com/graph-gophers/graphql-go" + "github.com/graph-gophers/graphql-go/relay" +) + +type query struct{} + +func (_ *query) Hello() string { return "Hello, world!" } + +func main() { + s := ` + type Query { + hello: String! + } + ` + schema := graphql.MustParseSchema(s, &query{}) + http.Handle("/query", &relay.Handler{Schema: schema}) + log.Fatal(http.ListenAndServe(":8080", nil)) +} +``` + +To test: + +```sh +curl -XPOST -d '{"query": "{ hello }"}' localhost:8080/query +``` + +### Resolvers + +A resolver must have one method or field for each field of the GraphQL type it resolves. The method or field name has to be [exported](https://golang.org/ref/spec#Exported_identifiers) and match the schema's field's name in a non-case-sensitive way. +You can use struct fields as resolvers by using `SchemaOpt: UseFieldResolvers()`. For example, +``` +opts := []graphql.SchemaOpt{graphql.UseFieldResolvers()} +schema := graphql.MustParseSchema(s, &query{}, opts...) +``` + +When using `UseFieldResolvers` schema option, a struct field will be used *only* when: +- there is no method for a struct field +- a struct field does not implement an interface method +- a struct field does not have arguments + +The method has up to two arguments: + +- Optional `context.Context` argument. +- Mandatory `*struct { ... }` argument if the corresponding GraphQL field has arguments. The names of the struct fields have to be [exported](https://golang.org/ref/spec#Exported_identifiers) and have to match the names of the GraphQL arguments in a non-case-sensitive way. + +The method has up to two results: + +- The GraphQL field's value as determined by the resolver. +- Optional `error` result. + +Example for a simple resolver method: + +```go +func (r *helloWorldResolver) Hello() string { + return "Hello world!" +} +``` + +The following signature is also allowed: + +```go +func (r *helloWorldResolver) Hello(ctx context.Context) (string, error) { + return "Hello world!", nil +} +``` + +### Schema Options + +- `UseStringDescriptions()` enables the usage of double quoted and triple quoted. When this is not enabled, comments are parsed as descriptions instead. +- `UseFieldResolvers()` specifies whether to use struct field resolvers. +- `MaxDepth(n int)` specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking. +- `MaxParallelism(n int)` specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10. +- `Tracer(tracer trace.Tracer)` is used to trace queries and fields. It defaults to `trace.OpenTracingTracer`. +- `ValidationTracer(tracer trace.ValidationTracer)` is used to trace validation errors. It defaults to `trace.NoopValidationTracer`. +- `Logger(logger log.Logger)` is used to log panics during query execution. It defaults to `exec.DefaultLogger`. +- `PanicHandler(panicHandler errors.PanicHandler)` is used to transform panics into errors during query execution. It defaults to `errors.DefaultPanicHandler`. +- `DisableIntrospection()` disables introspection queries. + +### Custom Errors + +Errors returned by resolvers can include custom extensions by implementing the `ResolverError` interface: + +```go +type ResolverError interface { + error + Extensions() map[string]interface{} +} +``` + +Example of a simple custom error: + +```go +type droidNotFoundError struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (e droidNotFoundError) Error() string { + return fmt.Sprintf("error [%s]: %s", e.Code, e.Message) +} + +func (e droidNotFoundError) Extensions() map[string]interface{} { + return map[string]interface{}{ + "code": e.Code, + "message": e.Message, + } +} +``` + +Which could produce a GraphQL error such as: + +```go +{ + "errors": [ + { + "message": "error [NotFound]: This is not the droid you are looking for", + "path": [ + "droid" + ], + "extensions": { + "code": "NotFound", + "message": "This is not the droid you are looking for" + } + } + ], + "data": null +} +``` + +### [Examples](https://github.com/graph-gophers/graphql-go/wiki/Examples) + +### [Companies that use this library](https://github.com/graph-gophers/graphql-go/wiki/Users) diff --git a/vendor/github.com/graph-gophers/graphql-go/decode/decode.go b/vendor/github.com/graph-gophers/graphql-go/decode/decode.go new file mode 100644 index 000000000..56a9d5b53 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/decode/decode.go @@ -0,0 +1,13 @@ +package decode + +// Unmarshaler defines the api of Go types mapped to custom GraphQL scalar types +type Unmarshaler interface { + // ImplementsGraphQLType maps the implementing custom Go type + // to the GraphQL scalar type in the schema. + ImplementsGraphQLType(name string) bool + // UnmarshalGraphQL is the custom unmarshaler for the implementing type + // + // This function will be called whenever you use the + // custom GraphQL scalar type as an input + UnmarshalGraphQL(input interface{}) error +} diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/errors.go b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go new file mode 100644 index 000000000..0f9340b10 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/errors/errors.go @@ -0,0 +1,59 @@ +package errors + +import ( + "fmt" +) + +type QueryError struct { + Err error `json:"-"` // Err holds underlying if available + Message string `json:"message"` + Locations []Location `json:"locations,omitempty"` + Path []interface{} `json:"path,omitempty"` + Rule string `json:"-"` + ResolverError error `json:"-"` + Extensions map[string]interface{} `json:"extensions,omitempty"` +} + +type Location struct { + Line int `json:"line"` + Column int `json:"column"` +} + +func (a Location) Before(b Location) bool { + return a.Line < b.Line || (a.Line == b.Line && a.Column < b.Column) +} + +func Errorf(format string, a ...interface{}) *QueryError { + // similar to fmt.Errorf, Errorf will wrap the last argument if it is an instance of error + var err error + if n := len(a); n > 0 { + if v, ok := a[n-1].(error); ok { + err = v + } + } + + return &QueryError{ + Err: err, + Message: fmt.Sprintf(format, a...), + } +} + +func (err *QueryError) Error() string { + if err == nil { + return "" + } + str := fmt.Sprintf("graphql: %s", err.Message) + for _, loc := range err.Locations { + str += fmt.Sprintf(" (line %d, column %d)", loc.Line, loc.Column) + } + return str +} + +func (err *QueryError) Unwrap() error { + if err == nil { + return nil + } + return err.Err +} + +var _ error = &QueryError{} diff --git a/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go b/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go new file mode 100644 index 000000000..5446c2a9c --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/errors/panic_handler.go @@ -0,0 +1,18 @@ +package errors + +import ( + "context" +) + +// PanicHandler is the interface used to create custom panic errors that occur during query execution +type PanicHandler interface { + MakePanicError(ctx context.Context, value interface{}) *QueryError +} + +// DefaultPanicHandler is the default PanicHandler +type DefaultPanicHandler struct{} + +// MakePanicError creates a new QueryError from a panic that occurred during execution +func (h *DefaultPanicHandler) MakePanicError(ctx context.Context, value interface{}) *QueryError { + return Errorf("panic occurred: %v", value) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/example/starwars/introspect.json b/vendor/github.com/graph-gophers/graphql-go/example/starwars/introspect.json new file mode 100644 index 000000000..2b955ee9c --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/example/starwars/introspect.json @@ -0,0 +1,2026 @@ +{ + "__schema": { + "directives": [ + { + "args": [ + { + "defaultValue": "\"No longer supported\"", + "description": "Explains why this element was deprecated, usually also including a suggestion\nfor how to access supported similar data. Formatted in\n[Markdown](https://daringfireball.net/projects/markdown/).", + "name": "reason", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "description": "Marks an element of a GraphQL schema as no longer supported.", + "locations": [ + "FIELD_DEFINITION", + "ENUM_VALUE" + ], + "name": "deprecated" + }, + { + "args": [ + { + "defaultValue": null, + "description": "Included when true.", + "name": "if", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + } + ], + "description": "Directs the executor to include this field or fragment only when the `if` argument is true.", + "locations": [ + "FIELD", + "FRAGMENT_SPREAD", + "INLINE_FRAGMENT" + ], + "name": "include" + }, + { + "args": [ + { + "defaultValue": null, + "description": "Skipped when true.", + "name": "if", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + } + ], + "description": "Directs the executor to skip this field or fragment when the `if` argument is true.", + "locations": [ + "FIELD", + "FRAGMENT_SPREAD", + "INLINE_FRAGMENT" + ], + "name": "skip" + } + ], + "mutationType": { + "name": "Mutation" + }, + "queryType": { + "name": "Query" + }, + "subscriptionType": null, + "types": [ + { + "description": "The `Boolean` scalar type represents `true` or `false`.", + "enumValues": null, + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "SCALAR", + "name": "Boolean", + "possibleTypes": null + }, + { + "description": "A character from the Star Wars universe", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": "The ID of the character", + "isDeprecated": false, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The name of the character", + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The friends of the character, or an empty list if they have none", + "isDeprecated": false, + "name": "friends", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "first", + "type": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + }, + { + "defaultValue": null, + "description": null, + "name": "after", + "type": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": "The friends of the character exposed as a connection with edges", + "isDeprecated": false, + "name": "friendsConnection", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "FriendsConnection", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The movies this character appears in", + "isDeprecated": false, + "name": "appearsIn", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "ENUM", + "name": "Episode", + "ofType": null + } + } + } + } + } + ], + "inputFields": null, + "interfaces": null, + "kind": "INTERFACE", + "name": "Character", + "possibleTypes": [ + { + "kind": "OBJECT", + "name": "Human", + "ofType": null + }, + { + "kind": "OBJECT", + "name": "Droid", + "ofType": null + } + ] + }, + { + "description": "An autonomous mechanical character in the Star Wars universe", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": "The ID of the droid", + "isDeprecated": false, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "What others call this droid", + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "This droid's friends, or an empty list if they have none", + "isDeprecated": false, + "name": "friends", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "first", + "type": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + }, + { + "defaultValue": null, + "description": null, + "name": "after", + "type": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": "The friends of the droid exposed as a connection with edges", + "isDeprecated": false, + "name": "friendsConnection", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "FriendsConnection", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The movies this droid appears in", + "isDeprecated": false, + "name": "appearsIn", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "ENUM", + "name": "Episode", + "ofType": null + } + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "This droid's primary function", + "isDeprecated": false, + "name": "primaryFunction", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [ + { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + ], + "kind": "OBJECT", + "name": "Droid", + "possibleTypes": null + }, + { + "description": "The episodes in the Star Wars trilogy", + "enumValues": [ + { + "deprecationReason": null, + "description": "Star Wars Episode IV: A New Hope, released in 1977.", + "isDeprecated": false, + "name": "NEWHOPE" + }, + { + "deprecationReason": null, + "description": "Star Wars Episode V: The Empire Strikes Back, released in 1980.", + "isDeprecated": false, + "name": "EMPIRE" + }, + { + "deprecationReason": null, + "description": "Star Wars Episode VI: Return of the Jedi, released in 1983.", + "isDeprecated": false, + "name": "JEDI" + } + ], + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "ENUM", + "name": "Episode", + "possibleTypes": null + }, + { + "description": "The `Float` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point).", + "enumValues": null, + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "SCALAR", + "name": "Float", + "possibleTypes": null + }, + { + "description": "A connection object for a character's friends", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": "The total number of friends", + "isDeprecated": false, + "name": "totalCount", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The edges for each of the character's friends.", + "isDeprecated": false, + "name": "edges", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "FriendsEdge", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "A list of the friends, as a convenience when edges are not needed.", + "isDeprecated": false, + "name": "friends", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "Information for paginating this connection", + "isDeprecated": false, + "name": "pageInfo", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "PageInfo", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "FriendsConnection", + "possibleTypes": null + }, + { + "description": "An edge object for a character's friends", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": "A cursor used for pagination", + "isDeprecated": false, + "name": "cursor", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The character represented by this friendship edge", + "isDeprecated": false, + "name": "node", + "type": { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "FriendsEdge", + "possibleTypes": null + }, + { + "description": "A humanoid creature from the Star Wars universe", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": "The ID of the human", + "isDeprecated": false, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "What this human calls themselves", + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [ + { + "defaultValue": "METER", + "description": null, + "name": "unit", + "type": { + "kind": "ENUM", + "name": "LengthUnit", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": "Height in the preferred unit, default is meters", + "isDeprecated": false, + "name": "height", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Float", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "Mass in kilograms, or null if unknown", + "isDeprecated": false, + "name": "mass", + "type": { + "kind": "SCALAR", + "name": "Float", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": "This human's friends, or an empty list if they have none", + "isDeprecated": false, + "name": "friends", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "first", + "type": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + }, + { + "defaultValue": null, + "description": null, + "name": "after", + "type": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": "The friends of the human exposed as a connection with edges", + "isDeprecated": false, + "name": "friendsConnection", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "FriendsConnection", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The movies this human appears in", + "isDeprecated": false, + "name": "appearsIn", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "ENUM", + "name": "Episode", + "ofType": null + } + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "A list of starships this person has piloted, or an empty list if none", + "isDeprecated": false, + "name": "starships", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "Starship", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [ + { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + ], + "kind": "OBJECT", + "name": "Human", + "possibleTypes": null + }, + { + "description": "The `ID` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as `\"4\"`) or integer (such as `4`) input value will be accepted as an ID.", + "enumValues": null, + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "SCALAR", + "name": "ID", + "possibleTypes": null + }, + { + "description": "The `Int` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1.", + "enumValues": null, + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "SCALAR", + "name": "Int", + "possibleTypes": null + }, + { + "description": "Units of height", + "enumValues": [ + { + "deprecationReason": null, + "description": "The standard unit around the world", + "isDeprecated": false, + "name": "METER" + }, + { + "deprecationReason": null, + "description": "Primarily used in the United States", + "isDeprecated": false, + "name": "FOOT" + } + ], + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "ENUM", + "name": "LengthUnit", + "possibleTypes": null + }, + { + "description": "The mutation type, represents all updates we can make to our data", + "enumValues": null, + "fields": [ + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "episode", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "ENUM", + "name": "Episode", + "ofType": null + } + } + }, + { + "defaultValue": null, + "description": null, + "name": "review", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "INPUT_OBJECT", + "name": "ReviewInput", + "ofType": null + } + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "createReview", + "type": { + "kind": "OBJECT", + "name": "Review", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "Mutation", + "possibleTypes": null + }, + { + "description": "Information for paginating this connection", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "startCursor", + "type": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "endCursor", + "type": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "hasNextPage", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "PageInfo", + "possibleTypes": null + }, + { + "description": "The query type, represents all of the entry points into our object graph", + "enumValues": null, + "fields": [ + { + "args": [ + { + "defaultValue": "NEWHOPE", + "description": null, + "name": "episode", + "type": { + "kind": "ENUM", + "name": "Episode", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "hero", + "type": { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "episode", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "ENUM", + "name": "Episode", + "ofType": null + } + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "reviews", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "Review", + "ofType": null + } + } + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "text", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "search", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "UNION", + "name": "SearchResult", + "ofType": null + } + } + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "character", + "type": { + "kind": "INTERFACE", + "name": "Character", + "ofType": null + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "droid", + "type": { + "kind": "OBJECT", + "name": "Droid", + "ofType": null + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "human", + "type": { + "kind": "OBJECT", + "name": "Human", + "ofType": null + } + }, + { + "args": [ + { + "defaultValue": null, + "description": null, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "starship", + "type": { + "kind": "OBJECT", + "name": "Starship", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "Query", + "possibleTypes": null + }, + { + "description": "Represents a review for a movie", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": "The number of stars this review gave, 1-5", + "isDeprecated": false, + "name": "stars", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "Comment about the movie", + "isDeprecated": false, + "name": "commentary", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "Review", + "possibleTypes": null + }, + { + "description": "The input object sent when someone is creating a new review", + "enumValues": null, + "fields": null, + "inputFields": [ + { + "defaultValue": null, + "description": "0-5 stars", + "name": "stars", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Int", + "ofType": null + } + } + }, + { + "defaultValue": null, + "description": "Comment about the movie, optional", + "name": "commentary", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "interfaces": null, + "kind": "INPUT_OBJECT", + "name": "ReviewInput", + "possibleTypes": null + }, + { + "description": null, + "enumValues": null, + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "UNION", + "name": "SearchResult", + "possibleTypes": [ + { + "kind": "OBJECT", + "name": "Human", + "ofType": null + }, + { + "kind": "OBJECT", + "name": "Droid", + "ofType": null + }, + { + "kind": "OBJECT", + "name": "Starship", + "ofType": null + } + ] + }, + { + "description": null, + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": "The ID of the starship", + "isDeprecated": false, + "name": "id", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "ID", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The name of the starship", + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [ + { + "defaultValue": "METER", + "description": null, + "name": "unit", + "type": { + "kind": "ENUM", + "name": "LengthUnit", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": "Length of the starship, along the longest axis", + "isDeprecated": false, + "name": "length", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Float", + "ofType": null + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "Starship", + "possibleTypes": null + }, + { + "description": "The `String` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text.", + "enumValues": null, + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "SCALAR", + "name": "String", + "possibleTypes": null + }, + { + "description": "A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.\n\nIn some cases, you need to provide options to alter GraphQL's execution behavior\nin ways field arguments will not suffice, such as conditionally including or\nskipping a field. Directives provide this by describing additional information\nto the executor.", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "description", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "locations", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "ENUM", + "name": "__DirectiveLocation", + "ofType": null + } + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "args", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__InputValue", + "ofType": null + } + } + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "__Directive", + "possibleTypes": null + }, + { + "description": "A Directive can be adjacent to many parts of the GraphQL language, a\n__DirectiveLocation describes one such possible adjacencies.", + "enumValues": [ + { + "deprecationReason": null, + "description": "Location adjacent to a query operation.", + "isDeprecated": false, + "name": "QUERY" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a mutation operation.", + "isDeprecated": false, + "name": "MUTATION" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a subscription operation.", + "isDeprecated": false, + "name": "SUBSCRIPTION" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a field.", + "isDeprecated": false, + "name": "FIELD" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a fragment definition.", + "isDeprecated": false, + "name": "FRAGMENT_DEFINITION" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a fragment spread.", + "isDeprecated": false, + "name": "FRAGMENT_SPREAD" + }, + { + "deprecationReason": null, + "description": "Location adjacent to an inline fragment.", + "isDeprecated": false, + "name": "INLINE_FRAGMENT" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a schema definition.", + "isDeprecated": false, + "name": "SCHEMA" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a scalar definition.", + "isDeprecated": false, + "name": "SCALAR" + }, + { + "deprecationReason": null, + "description": "Location adjacent to an object type definition.", + "isDeprecated": false, + "name": "OBJECT" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a field definition.", + "isDeprecated": false, + "name": "FIELD_DEFINITION" + }, + { + "deprecationReason": null, + "description": "Location adjacent to an argument definition.", + "isDeprecated": false, + "name": "ARGUMENT_DEFINITION" + }, + { + "deprecationReason": null, + "description": "Location adjacent to an interface definition.", + "isDeprecated": false, + "name": "INTERFACE" + }, + { + "deprecationReason": null, + "description": "Location adjacent to a union definition.", + "isDeprecated": false, + "name": "UNION" + }, + { + "deprecationReason": null, + "description": "Location adjacent to an enum definition.", + "isDeprecated": false, + "name": "ENUM" + }, + { + "deprecationReason": null, + "description": "Location adjacent to an enum value definition.", + "isDeprecated": false, + "name": "ENUM_VALUE" + }, + { + "deprecationReason": null, + "description": "Location adjacent to an input object type definition.", + "isDeprecated": false, + "name": "INPUT_OBJECT" + }, + { + "deprecationReason": null, + "description": "Location adjacent to an input object field definition.", + "isDeprecated": false, + "name": "INPUT_FIELD_DEFINITION" + } + ], + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "ENUM", + "name": "__DirectiveLocation", + "possibleTypes": null + }, + { + "description": "One possible value for a given Enum. Enum values are unique values, not a\nplaceholder for a string or numeric value. However an Enum value is returned in\na JSON response as a string.", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "description", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "isDeprecated", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "deprecationReason", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "__EnumValue", + "possibleTypes": null + }, + { + "description": "Object and Interface types are described by a list of Fields, each of which has\na name, potentially a list of arguments, and a return type.", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "description", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "args", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__InputValue", + "ofType": null + } + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "type", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "isDeprecated", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "deprecationReason", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "__Field", + "possibleTypes": null + }, + { + "description": "Arguments provided to Fields or Directives and the input fields of an\nInputObject are represented as Input Values which describe their type and\noptionally a default value.", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "name", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "description", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "type", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "A GraphQL-formatted string representing the default value for this input value.", + "isDeprecated": false, + "name": "defaultValue", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "__InputValue", + "possibleTypes": null + }, + { + "description": "A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all\navailable types and directives on the server, as well as the entry points for\nquery, mutation, and subscription operations.", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": "A list of all types supported by this server.", + "isDeprecated": false, + "name": "types", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "The type that query operations will be rooted at.", + "isDeprecated": false, + "name": "queryType", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": "If this server supports mutation, the type that mutation operations will be rooted at.", + "isDeprecated": false, + "name": "mutationType", + "type": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": "If this server support subscription, the type that subscription operations will be rooted at.", + "isDeprecated": false, + "name": "subscriptionType", + "type": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": "A list of all directives supported by this server.", + "isDeprecated": false, + "name": "directives", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__Directive", + "ofType": null + } + } + } + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "__Schema", + "possibleTypes": null + }, + { + "description": "The fundamental unit of any GraphQL Schema is the type. There are many kinds of\ntypes in GraphQL as represented by the `__TypeKind` enum.\n\nDepending on the kind of a type, certain fields describe information about that\ntype. Scalar types provide no information beyond a name and description, while\nEnum types provide their values. Object and Interface types provide the fields\nthey describe. Abstract types, Union and Interface, provide the Object types\npossible at runtime. List and NonNull types compose other types.", + "enumValues": null, + "fields": [ + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "kind", + "type": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "ENUM", + "name": "__TypeKind", + "ofType": null + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "name", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "description", + "type": { + "kind": "SCALAR", + "name": "String", + "ofType": null + } + }, + { + "args": [ + { + "defaultValue": "false", + "description": null, + "name": "includeDeprecated", + "type": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "fields", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__Field", + "ofType": null + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "interfaces", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "possibleTypes", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + } + } + }, + { + "args": [ + { + "defaultValue": "false", + "description": null, + "name": "includeDeprecated", + "type": { + "kind": "SCALAR", + "name": "Boolean", + "ofType": null + } + } + ], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "enumValues", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__EnumValue", + "ofType": null + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "inputFields", + "type": { + "kind": "LIST", + "name": null, + "ofType": { + "kind": "NON_NULL", + "name": null, + "ofType": { + "kind": "OBJECT", + "name": "__InputValue", + "ofType": null + } + } + } + }, + { + "args": [], + "deprecationReason": null, + "description": null, + "isDeprecated": false, + "name": "ofType", + "type": { + "kind": "OBJECT", + "name": "__Type", + "ofType": null + } + } + ], + "inputFields": null, + "interfaces": [], + "kind": "OBJECT", + "name": "__Type", + "possibleTypes": null + }, + { + "description": "An enum describing what kind of type a given `__Type` is.", + "enumValues": [ + { + "deprecationReason": null, + "description": "Indicates this type is a scalar.", + "isDeprecated": false, + "name": "SCALAR" + }, + { + "deprecationReason": null, + "description": "Indicates this type is an object. `fields` and `interfaces` are valid fields.", + "isDeprecated": false, + "name": "OBJECT" + }, + { + "deprecationReason": null, + "description": "Indicates this type is an interface. `fields` and `possibleTypes` are valid fields.", + "isDeprecated": false, + "name": "INTERFACE" + }, + { + "deprecationReason": null, + "description": "Indicates this type is a union. `possibleTypes` is a valid field.", + "isDeprecated": false, + "name": "UNION" + }, + { + "deprecationReason": null, + "description": "Indicates this type is an enum. `enumValues` is a valid field.", + "isDeprecated": false, + "name": "ENUM" + }, + { + "deprecationReason": null, + "description": "Indicates this type is an input object. `inputFields` is a valid field.", + "isDeprecated": false, + "name": "INPUT_OBJECT" + }, + { + "deprecationReason": null, + "description": "Indicates this type is a list. `ofType` is a valid field.", + "isDeprecated": false, + "name": "LIST" + }, + { + "deprecationReason": null, + "description": "Indicates this type is a non-null. `ofType` is a valid field.", + "isDeprecated": false, + "name": "NON_NULL" + } + ], + "fields": null, + "inputFields": null, + "interfaces": null, + "kind": "ENUM", + "name": "__TypeKind", + "possibleTypes": null + } + ] + } +} \ No newline at end of file diff --git a/vendor/github.com/graph-gophers/graphql-go/example/starwars/starwars.go b/vendor/github.com/graph-gophers/graphql-go/example/starwars/starwars.go new file mode 100644 index 000000000..07cbb9f40 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/example/starwars/starwars.go @@ -0,0 +1,647 @@ +// Package starwars provides a example schema and resolver based on Star Wars characters. +// +// Source: https://github.com/graphql/graphql.github.io/blob/source/site/_core/swapiSchema.js +package starwars + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + graphql "github.com/graph-gophers/graphql-go" +) + +var Schema = ` + schema { + query: Query + mutation: Mutation + } + # The query type, represents all of the entry points into our object graph + type Query { + hero(episode: Episode = NEWHOPE): Character + reviews(episode: Episode!): [Review]! + search(text: String!): [SearchResult]! + character(id: ID!): Character + droid(id: ID!): Droid + human(id: ID!): Human + starship(id: ID!): Starship + } + # The mutation type, represents all updates we can make to our data + type Mutation { + createReview(episode: Episode!, review: ReviewInput!): Review + } + # The episodes in the Star Wars trilogy + enum Episode { + # Star Wars Episode IV: A New Hope, released in 1977. + NEWHOPE + # Star Wars Episode V: The Empire Strikes Back, released in 1980. + EMPIRE + # Star Wars Episode VI: Return of the Jedi, released in 1983. + JEDI + } + # A character from the Star Wars universe + interface Character { + # The ID of the character + id: ID! + # The name of the character + name: String! + # The friends of the character, or an empty list if they have none + friends: [Character] + # The friends of the character exposed as a connection with edges + friendsConnection(first: Int, after: ID): FriendsConnection! + # The movies this character appears in + appearsIn: [Episode!]! + } + # Units of height + enum LengthUnit { + # The standard unit around the world + METER + # Primarily used in the United States + FOOT + } + # A humanoid creature from the Star Wars universe + type Human implements Character { + # The ID of the human + id: ID! + # What this human calls themselves + name: String! + # Height in the preferred unit, default is meters + height(unit: LengthUnit = METER): Float! + # Mass in kilograms, or null if unknown + mass: Float + # This human's friends, or an empty list if they have none + friends: [Character] + # The friends of the human exposed as a connection with edges + friendsConnection(first: Int, after: ID): FriendsConnection! + # The movies this human appears in + appearsIn: [Episode!]! + # A list of starships this person has piloted, or an empty list if none + starships: [Starship] + } + # An autonomous mechanical character in the Star Wars universe + type Droid implements Character { + # The ID of the droid + id: ID! + # What others call this droid + name: String! + # This droid's friends, or an empty list if they have none + friends: [Character] + # The friends of the droid exposed as a connection with edges + friendsConnection(first: Int, after: ID): FriendsConnection! + # The movies this droid appears in + appearsIn: [Episode!]! + # This droid's primary function + primaryFunction: String + } + # A connection object for a character's friends + type FriendsConnection { + # The total number of friends + totalCount: Int! + # The edges for each of the character's friends. + edges: [FriendsEdge] + # A list of the friends, as a convenience when edges are not needed. + friends: [Character] + # Information for paginating this connection + pageInfo: PageInfo! + } + # An edge object for a character's friends + type FriendsEdge { + # A cursor used for pagination + cursor: ID! + # The character represented by this friendship edge + node: Character + } + # Information for paginating this connection + type PageInfo { + startCursor: ID + endCursor: ID + hasNextPage: Boolean! + } + # Represents a review for a movie + type Review { + # The number of stars this review gave, 1-5 + stars: Int! + # Comment about the movie + commentary: String + } + # The input object sent when someone is creating a new review + input ReviewInput { + # 0-5 stars + stars: Int! + # Comment about the movie, optional + commentary: String + } + type Starship { + # The ID of the starship + id: ID! + # The name of the starship + name: String! + # Length of the starship, along the longest axis + length(unit: LengthUnit = METER): Float! + } + union SearchResult = Human | Droid | Starship +` + +type human struct { + ID graphql.ID + Name string + Friends []graphql.ID + AppearsIn []string + Height float64 + Mass int + Starships []graphql.ID +} + +var humans = []*human{ + { + ID: "1000", + Name: "Luke Skywalker", + Friends: []graphql.ID{"1002", "1003", "2000", "2001"}, + AppearsIn: []string{"NEWHOPE", "EMPIRE", "JEDI"}, + Height: 1.72, + Mass: 77, + Starships: []graphql.ID{"3001", "3003"}, + }, + { + ID: "1001", + Name: "Darth Vader", + Friends: []graphql.ID{"1004"}, + AppearsIn: []string{"NEWHOPE", "EMPIRE", "JEDI"}, + Height: 2.02, + Mass: 136, + Starships: []graphql.ID{"3002"}, + }, + { + ID: "1002", + Name: "Han Solo", + Friends: []graphql.ID{"1000", "1003", "2001"}, + AppearsIn: []string{"NEWHOPE", "EMPIRE", "JEDI"}, + Height: 1.8, + Mass: 80, + Starships: []graphql.ID{"3000", "3003"}, + }, + { + ID: "1003", + Name: "Leia Organa", + Friends: []graphql.ID{"1000", "1002", "2000", "2001"}, + AppearsIn: []string{"NEWHOPE", "EMPIRE", "JEDI"}, + Height: 1.5, + Mass: 49, + }, + { + ID: "1004", + Name: "Wilhuff Tarkin", + Friends: []graphql.ID{"1001"}, + AppearsIn: []string{"NEWHOPE"}, + Height: 1.8, + Mass: 0, + }, +} + +var humanData = make(map[graphql.ID]*human) + +func init() { + for _, h := range humans { + humanData[h.ID] = h + } +} + +type droid struct { + ID graphql.ID + Name string + Friends []graphql.ID + AppearsIn []string + PrimaryFunction string +} + +var droids = []*droid{ + { + ID: "2000", + Name: "C-3PO", + Friends: []graphql.ID{"1000", "1002", "1003", "2001"}, + AppearsIn: []string{"NEWHOPE", "EMPIRE", "JEDI"}, + PrimaryFunction: "Protocol", + }, + { + ID: "2001", + Name: "R2-D2", + Friends: []graphql.ID{"1000", "1002", "1003"}, + AppearsIn: []string{"NEWHOPE", "EMPIRE", "JEDI"}, + PrimaryFunction: "Astromech", + }, +} + +var droidData = make(map[graphql.ID]*droid) + +func init() { + for _, d := range droids { + droidData[d.ID] = d + } +} + +type starship struct { + ID graphql.ID + Name string + Length float64 +} + +var starships = []*starship{ + { + ID: "3000", + Name: "Millennium Falcon", + Length: 34.37, + }, + { + ID: "3001", + Name: "X-Wing", + Length: 12.5, + }, + { + ID: "3002", + Name: "TIE Advanced x1", + Length: 9.2, + }, + { + ID: "3003", + Name: "Imperial shuttle", + Length: 20, + }, +} + +var starshipData = make(map[graphql.ID]*starship) + +func init() { + for _, s := range starships { + starshipData[s.ID] = s + } +} + +type review struct { + stars int32 + commentary *string +} + +var reviews = make(map[string][]*review) + +type Resolver struct{} + +func (r *Resolver) Hero(args struct{ Episode string }) *characterResolver { + if args.Episode == "EMPIRE" { + return &characterResolver{&humanResolver{humanData["1000"]}} + } + return &characterResolver{&droidResolver{droidData["2001"]}} +} + +func (r *Resolver) Reviews(args struct{ Episode string }) []*reviewResolver { + var l []*reviewResolver + for _, review := range reviews[args.Episode] { + l = append(l, &reviewResolver{review}) + } + return l +} + +func (r *Resolver) Search(args struct{ Text string }) []*searchResultResolver { + var l []*searchResultResolver + for _, h := range humans { + if strings.Contains(h.Name, args.Text) { + l = append(l, &searchResultResolver{&humanResolver{h}}) + } + } + for _, d := range droids { + if strings.Contains(d.Name, args.Text) { + l = append(l, &searchResultResolver{&droidResolver{d}}) + } + } + for _, s := range starships { + if strings.Contains(s.Name, args.Text) { + l = append(l, &searchResultResolver{&starshipResolver{s}}) + } + } + return l +} + +func (r *Resolver) Character(args struct{ ID graphql.ID }) *characterResolver { + if h := humanData[args.ID]; h != nil { + return &characterResolver{&humanResolver{h}} + } + if d := droidData[args.ID]; d != nil { + return &characterResolver{&droidResolver{d}} + } + return nil +} + +func (r *Resolver) Human(args struct{ ID graphql.ID }) *humanResolver { + if h := humanData[args.ID]; h != nil { + return &humanResolver{h} + } + return nil +} + +func (r *Resolver) Droid(args struct{ ID graphql.ID }) *droidResolver { + if d := droidData[args.ID]; d != nil { + return &droidResolver{d} + } + return nil +} + +func (r *Resolver) Starship(args struct{ ID graphql.ID }) *starshipResolver { + if s := starshipData[args.ID]; s != nil { + return &starshipResolver{s} + } + return nil +} + +func (r *Resolver) CreateReview(args *struct { + Episode string + Review *reviewInput +}) *reviewResolver { + review := &review{ + stars: args.Review.Stars, + commentary: args.Review.Commentary, + } + reviews[args.Episode] = append(reviews[args.Episode], review) + return &reviewResolver{review} +} + +type friendsConnectionArgs struct { + First *int32 + After *graphql.ID +} + +type character interface { + ID() graphql.ID + Name() string + Friends() *[]*characterResolver + FriendsConnection(friendsConnectionArgs) (*friendsConnectionResolver, error) + AppearsIn() []string +} + +type characterResolver struct { + character +} + +func (r *characterResolver) ToHuman() (*humanResolver, bool) { + c, ok := r.character.(*humanResolver) + return c, ok +} + +func (r *characterResolver) ToDroid() (*droidResolver, bool) { + c, ok := r.character.(*droidResolver) + return c, ok +} + +type humanResolver struct { + h *human +} + +func (r *humanResolver) ID() graphql.ID { + return r.h.ID +} + +func (r *humanResolver) Name() string { + return r.h.Name +} + +func (r *humanResolver) Height(args struct{ Unit string }) float64 { + return convertLength(r.h.Height, args.Unit) +} + +func (r *humanResolver) Mass() *float64 { + if r.h.Mass == 0 { + return nil + } + f := float64(r.h.Mass) + return &f +} + +func (r *humanResolver) Friends() *[]*characterResolver { + return resolveCharacters(r.h.Friends) +} + +func (r *humanResolver) FriendsConnection(args friendsConnectionArgs) (*friendsConnectionResolver, error) { + return newFriendsConnectionResolver(r.h.Friends, args) +} + +func (r *humanResolver) AppearsIn() []string { + return r.h.AppearsIn +} + +func (r *humanResolver) Starships() *[]*starshipResolver { + l := make([]*starshipResolver, len(r.h.Starships)) + for i, id := range r.h.Starships { + l[i] = &starshipResolver{starshipData[id]} + } + return &l +} + +type droidResolver struct { + d *droid +} + +func (r *droidResolver) ID() graphql.ID { + return r.d.ID +} + +func (r *droidResolver) Name() string { + return r.d.Name +} + +func (r *droidResolver) Friends() *[]*characterResolver { + return resolveCharacters(r.d.Friends) +} + +func (r *droidResolver) FriendsConnection(args friendsConnectionArgs) (*friendsConnectionResolver, error) { + return newFriendsConnectionResolver(r.d.Friends, args) +} + +func (r *droidResolver) AppearsIn() []string { + return r.d.AppearsIn +} + +func (r *droidResolver) PrimaryFunction() *string { + if r.d.PrimaryFunction == "" { + return nil + } + return &r.d.PrimaryFunction +} + +type starshipResolver struct { + s *starship +} + +func (r *starshipResolver) ID() graphql.ID { + return r.s.ID +} + +func (r *starshipResolver) Name() string { + return r.s.Name +} + +func (r *starshipResolver) Length(args struct{ Unit string }) float64 { + return convertLength(r.s.Length, args.Unit) +} + +type searchResultResolver struct { + result interface{} +} + +func (r *searchResultResolver) ToHuman() (*humanResolver, bool) { + res, ok := r.result.(*humanResolver) + return res, ok +} + +func (r *searchResultResolver) ToDroid() (*droidResolver, bool) { + res, ok := r.result.(*droidResolver) + return res, ok +} + +func (r *searchResultResolver) ToStarship() (*starshipResolver, bool) { + res, ok := r.result.(*starshipResolver) + return res, ok +} + +func convertLength(meters float64, unit string) float64 { + switch unit { + case "METER": + return meters + case "FOOT": + return meters * 3.28084 + default: + panic("invalid unit") + } +} + +func resolveCharacters(ids []graphql.ID) *[]*characterResolver { + var characters []*characterResolver + for _, id := range ids { + if c := resolveCharacter(id); c != nil { + characters = append(characters, c) + } + } + return &characters +} + +func resolveCharacter(id graphql.ID) *characterResolver { + if h, ok := humanData[id]; ok { + return &characterResolver{&humanResolver{h}} + } + if d, ok := droidData[id]; ok { + return &characterResolver{&droidResolver{d}} + } + return nil +} + +type reviewResolver struct { + r *review +} + +func (r *reviewResolver) Stars() int32 { + return r.r.stars +} + +func (r *reviewResolver) Commentary() *string { + return r.r.commentary +} + +type friendsConnectionResolver struct { + ids []graphql.ID + from int + to int +} + +func newFriendsConnectionResolver(ids []graphql.ID, args friendsConnectionArgs) (*friendsConnectionResolver, error) { + from := 0 + if args.After != nil { + b, err := base64.StdEncoding.DecodeString(string(*args.After)) + if err != nil { + return nil, err + } + i, err := strconv.Atoi(strings.TrimPrefix(string(b), "cursor")) + if err != nil { + return nil, err + } + from = i + } + + to := len(ids) + if args.First != nil { + to = from + int(*args.First) + if to > len(ids) { + to = len(ids) + } + } + + return &friendsConnectionResolver{ + ids: ids, + from: from, + to: to, + }, nil +} + +func (r *friendsConnectionResolver) TotalCount() int32 { + return int32(len(r.ids)) +} + +func (r *friendsConnectionResolver) Edges() *[]*friendsEdgeResolver { + l := make([]*friendsEdgeResolver, r.to-r.from) + for i := range l { + l[i] = &friendsEdgeResolver{ + cursor: encodeCursor(r.from + i), + id: r.ids[r.from+i], + } + } + return &l +} + +func (r *friendsConnectionResolver) Friends() *[]*characterResolver { + return resolveCharacters(r.ids[r.from:r.to]) +} + +func (r *friendsConnectionResolver) PageInfo() *pageInfoResolver { + return &pageInfoResolver{ + startCursor: encodeCursor(r.from), + endCursor: encodeCursor(r.to - 1), + hasNextPage: r.to < len(r.ids), + } +} + +func encodeCursor(i int) graphql.ID { + return graphql.ID(base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("cursor%d", i+1)))) +} + +type friendsEdgeResolver struct { + cursor graphql.ID + id graphql.ID +} + +func (r *friendsEdgeResolver) Cursor() graphql.ID { + return r.cursor +} + +func (r *friendsEdgeResolver) Node() *characterResolver { + return resolveCharacter(r.id) +} + +type pageInfoResolver struct { + startCursor graphql.ID + endCursor graphql.ID + hasNextPage bool +} + +func (r *pageInfoResolver) StartCursor() *graphql.ID { + return &r.startCursor +} + +func (r *pageInfoResolver) EndCursor() *graphql.ID { + return &r.endCursor +} + +func (r *pageInfoResolver) HasNextPage() bool { + return r.hasNextPage +} + +type reviewInput struct { + Stars int32 + Commentary *string +} diff --git a/vendor/github.com/graph-gophers/graphql-go/go.mod b/vendor/github.com/graph-gophers/graphql-go/go.mod new file mode 100644 index 000000000..2c814b0be --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/go.mod @@ -0,0 +1,5 @@ +module github.com/graph-gophers/graphql-go + +require github.com/opentracing/opentracing-go v1.1.0 + +go 1.13 diff --git a/vendor/github.com/graph-gophers/graphql-go/go.sum b/vendor/github.com/graph-gophers/graphql-go/go.sum new file mode 100644 index 000000000..71fd021b0 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/go.sum @@ -0,0 +1,2 @@ +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= diff --git a/vendor/github.com/graph-gophers/graphql-go/graphql.go b/vendor/github.com/graph-gophers/graphql-go/graphql.go new file mode 100644 index 000000000..76a6434d5 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/graphql.go @@ -0,0 +1,339 @@ +package graphql + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/internal/exec" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/exec/selected" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/internal/schema" + "github.com/graph-gophers/graphql-go/internal/validation" + "github.com/graph-gophers/graphql-go/introspection" + "github.com/graph-gophers/graphql-go/log" + "github.com/graph-gophers/graphql-go/trace" + "github.com/graph-gophers/graphql-go/types" +) + +// ParseSchema parses a GraphQL schema and attaches the given root resolver. It returns an error if +// the Go type signature of the resolvers does not match the schema. If nil is passed as the +// resolver, then the schema can not be executed, but it may be inspected (e.g. with ToJSON). +func ParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) (*Schema, error) { + s := &Schema{ + schema: schema.New(), + maxParallelism: 10, + tracer: trace.OpenTracingTracer{}, + logger: &log.DefaultLogger{}, + panicHandler: &errors.DefaultPanicHandler{}, + } + for _, opt := range opts { + opt(s) + } + + if s.validationTracer == nil { + if tracer, ok := s.tracer.(trace.ValidationTracerContext); ok { + s.validationTracer = tracer + } else { + s.validationTracer = &validationBridgingTracer{tracer: trace.NoopValidationTracer{}} + } + } + + if err := schema.Parse(s.schema, schemaString, s.useStringDescriptions); err != nil { + return nil, err + } + if err := s.validateSchema(); err != nil { + return nil, err + } + + r, err := resolvable.ApplyResolver(s.schema, resolver) + if err != nil { + return nil, err + } + s.res = r + + return s, nil +} + +// MustParseSchema calls ParseSchema and panics on error. +func MustParseSchema(schemaString string, resolver interface{}, opts ...SchemaOpt) *Schema { + s, err := ParseSchema(schemaString, resolver, opts...) + if err != nil { + panic(err) + } + return s +} + +// Schema represents a GraphQL schema with an optional resolver. +type Schema struct { + schema *types.Schema + res *resolvable.Schema + + maxDepth int + maxParallelism int + tracer trace.Tracer + validationTracer trace.ValidationTracerContext + logger log.Logger + panicHandler errors.PanicHandler + useStringDescriptions bool + disableIntrospection bool + subscribeResolverTimeout time.Duration +} + +func (s *Schema) ASTSchema() *types.Schema { + return s.schema +} + +// SchemaOpt is an option to pass to ParseSchema or MustParseSchema. +type SchemaOpt func(*Schema) + +// UseStringDescriptions enables the usage of double quoted and triple quoted +// strings as descriptions as per the June 2018 spec +// https://facebook.github.io/graphql/June2018/. When this is not enabled, +// comments are parsed as descriptions instead. +func UseStringDescriptions() SchemaOpt { + return func(s *Schema) { + s.useStringDescriptions = true + } +} + +// UseFieldResolvers specifies whether to use struct field resolvers +func UseFieldResolvers() SchemaOpt { + return func(s *Schema) { + s.schema.UseFieldResolvers = true + } +} + +// MaxDepth specifies the maximum field nesting depth in a query. The default is 0 which disables max depth checking. +func MaxDepth(n int) SchemaOpt { + return func(s *Schema) { + s.maxDepth = n + } +} + +// MaxParallelism specifies the maximum number of resolvers per request allowed to run in parallel. The default is 10. +func MaxParallelism(n int) SchemaOpt { + return func(s *Schema) { + s.maxParallelism = n + } +} + +// Tracer is used to trace queries and fields. It defaults to trace.OpenTracingTracer. +func Tracer(tracer trace.Tracer) SchemaOpt { + return func(s *Schema) { + s.tracer = tracer + } +} + +// ValidationTracer is used to trace validation errors. It defaults to trace.NoopValidationTracer. +// Deprecated: context is needed to support tracing correctly. Use a Tracer which implements trace.ValidationTracerContext. +func ValidationTracer(tracer trace.ValidationTracer) SchemaOpt { //nolint:staticcheck + return func(s *Schema) { + s.validationTracer = &validationBridgingTracer{tracer: tracer} + } +} + +// Logger is used to log panics during query execution. It defaults to exec.DefaultLogger. +func Logger(logger log.Logger) SchemaOpt { + return func(s *Schema) { + s.logger = logger + } +} + +// PanicHandler is used to customize the panic errors during query execution. +// It defaults to errors.DefaultPanicHandler. +func PanicHandler(panicHandler errors.PanicHandler) SchemaOpt { + return func(s *Schema) { + s.panicHandler = panicHandler + } +} + +// DisableIntrospection disables introspection queries. +func DisableIntrospection() SchemaOpt { + return func(s *Schema) { + s.disableIntrospection = true + } +} + +// SubscribeResolverTimeout is an option to control the amount of time +// we allow for a single subscribe message resolver to complete it's job +// before it times out and returns an error to the subscriber. +func SubscribeResolverTimeout(timeout time.Duration) SchemaOpt { + return func(s *Schema) { + s.subscribeResolverTimeout = timeout + } +} + +// Response represents a typical response of a GraphQL server. It may be encoded to JSON directly or +// it may be further processed to a custom response type, for example to include custom error data. +// Errors are intentionally serialized first based on the advice in https://github.com/facebook/graphql/commit/7b40390d48680b15cb93e02d46ac5eb249689876#diff-757cea6edf0288677a9eea4cfc801d87R107 +type Response struct { + Errors []*errors.QueryError `json:"errors,omitempty"` + Data json.RawMessage `json:"data,omitempty"` + Extensions map[string]interface{} `json:"extensions,omitempty"` +} + +// Validate validates the given query with the schema. +func (s *Schema) Validate(queryString string) []*errors.QueryError { + return s.ValidateWithVariables(queryString, nil) +} + +// ValidateWithVariables validates the given query with the schema and the input variables. +func (s *Schema) ValidateWithVariables(queryString string, variables map[string]interface{}) []*errors.QueryError { + doc, qErr := query.Parse(queryString) + if qErr != nil { + return []*errors.QueryError{qErr} + } + + return validation.Validate(s.schema, doc, variables, s.maxDepth) +} + +// Exec executes the given query with the schema's resolver. It panics if the schema was created +// without a resolver. If the context get cancelled, no further resolvers will be called and a +// the context error will be returned as soon as possible (not immediately). +func (s *Schema) Exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) *Response { + if !s.res.Resolver.IsValid() { + panic("schema created without resolver, can not exec") + } + return s.exec(ctx, queryString, operationName, variables, s.res) +} + +func (s *Schema) exec(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) *Response { + doc, qErr := query.Parse(queryString) + if qErr != nil { + return &Response{Errors: []*errors.QueryError{qErr}} + } + + validationFinish := s.validationTracer.TraceValidation(ctx) + errs := validation.Validate(s.schema, doc, variables, s.maxDepth) + validationFinish(errs) + if len(errs) != 0 { + return &Response{Errors: errs} + } + + op, err := getOperation(doc, operationName) + if err != nil { + return &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}} + } + + // If the optional "operationName" POST parameter is not provided then + // use the query's operation name for improved tracing. + if operationName == "" { + operationName = op.Name.Name + } + + // Subscriptions are not valid in Exec. Use schema.Subscribe() instead. + if op.Type == query.Subscription { + return &Response{Errors: []*errors.QueryError{{Message: "graphql-ws protocol header is missing"}}} + } + if op.Type == query.Mutation { + if _, ok := s.schema.EntryPoints["mutation"]; !ok { + return &Response{Errors: []*errors.QueryError{{Message: "no mutations are offered by the schema"}}} + } + } + + // Fill in variables with the defaults from the operation + if variables == nil { + variables = make(map[string]interface{}, len(op.Vars)) + } + for _, v := range op.Vars { + if _, ok := variables[v.Name.Name]; !ok && v.Default != nil { + variables[v.Name.Name] = v.Default.Deserialize(nil) + } + } + + r := &exec.Request{ + Request: selected.Request{ + Doc: doc, + Vars: variables, + Schema: s.schema, + DisableIntrospection: s.disableIntrospection, + }, + Limiter: make(chan struct{}, s.maxParallelism), + Tracer: s.tracer, + Logger: s.logger, + PanicHandler: s.panicHandler, + } + varTypes := make(map[string]*introspection.Type) + for _, v := range op.Vars { + t, err := common.ResolveType(v.Type, s.schema.Resolve) + if err != nil { + return &Response{Errors: []*errors.QueryError{err}} + } + varTypes[v.Name.Name] = introspection.WrapType(t) + } + traceCtx, finish := s.tracer.TraceQuery(ctx, queryString, operationName, variables, varTypes) + data, errs := r.Execute(traceCtx, res, op) + finish(errs) + + return &Response{ + Data: data, + Errors: errs, + } +} + +func (s *Schema) validateSchema() error { + // https://graphql.github.io/graphql-spec/June2018/#sec-Root-Operation-Types + // > The query root operation type must be provided and must be an Object type. + if err := validateRootOp(s.schema, "query", true); err != nil { + return err + } + // > The mutation root operation type is optional; if it is not provided, the service does not support mutations. + // > If it is provided, it must be an Object type. + if err := validateRootOp(s.schema, "mutation", false); err != nil { + return err + } + // > Similarly, the subscription root operation type is also optional; if it is not provided, the service does not + // > support subscriptions. If it is provided, it must be an Object type. + if err := validateRootOp(s.schema, "subscription", false); err != nil { + return err + } + return nil +} + +type validationBridgingTracer struct { + tracer trace.ValidationTracer //nolint:staticcheck +} + +func (t *validationBridgingTracer) TraceValidation(context.Context) trace.TraceValidationFinishFunc { + return t.tracer.TraceValidation() +} + +func validateRootOp(s *types.Schema, name string, mandatory bool) error { + t, ok := s.EntryPoints[name] + if !ok { + if mandatory { + return fmt.Errorf("root operation %q must be defined", name) + } + return nil + } + if t.Kind() != "OBJECT" { + return fmt.Errorf("root operation %q must be an OBJECT", name) + } + return nil +} + +func getOperation(document *types.ExecutableDefinition, operationName string) (*types.OperationDefinition, error) { + if len(document.Operations) == 0 { + return nil, fmt.Errorf("no operations in query document") + } + + if operationName == "" { + if len(document.Operations) > 1 { + return nil, fmt.Errorf("more than one operation in query document and no operation name given") + } + for _, op := range document.Operations { + return op, nil // return the one and only operation + } + } + + op := document.Operations.Get(operationName) + if op == nil { + return nil, fmt.Errorf("no operation with name %q", operationName) + } + return op, nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/id.go b/vendor/github.com/graph-gophers/graphql-go/id.go new file mode 100644 index 000000000..80bdac906 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/id.go @@ -0,0 +1,30 @@ +package graphql + +import ( + "fmt" + "strconv" +) + +// ID represents GraphQL's "ID" scalar type. A custom type may be used instead. +type ID string + +func (ID) ImplementsGraphQLType(name string) bool { + return name == "ID" +} + +func (id *ID) UnmarshalGraphQL(input interface{}) error { + var err error + switch input := input.(type) { + case string: + *id = ID(input) + case int32: + *id = ID(strconv.Itoa(int(input))) + default: + err = fmt.Errorf("wrong type for ID: %T", input) + } + return err +} + +func (id ID) MarshalJSON() ([]byte, error) { + return strconv.AppendQuote(nil, string(id)), nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go new file mode 100644 index 000000000..1f7fe8133 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/blockstring.go @@ -0,0 +1,103 @@ +// MIT License +// +// Copyright (c) 2019 GraphQL Contributors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// This implementation has been adapted from the graphql-js reference implementation +// https://github.com/graphql/graphql-js/blob/5eb7c4ded7ceb83ac742149cbe0dae07a8af9a30/src/language/blockString.js +// which is released under the MIT License above. + +package common + +import ( + "strings" +) + +// Produces the value of a block string from its parsed raw value, similar to +// CoffeeScript's block string, Python's docstring trim or Ruby's strip_heredoc. +// +// This implements the GraphQL spec's BlockStringValue() static algorithm. +func blockString(raw string) string { + lines := strings.Split(raw, "\n") + + // Remove common indentation from all lines except the first (which has none) + ind := blockStringIndentation(lines) + if ind > 0 { + for i := 1; i < len(lines); i++ { + l := lines[i] + if len(l) < ind { + lines[i] = "" + continue + } + lines[i] = l[ind:] + } + } + + // Remove leading and trailing blank lines + trimStart := 0 + for i := 0; i < len(lines) && isBlank(lines[i]); i++ { + trimStart++ + } + lines = lines[trimStart:] + trimEnd := 0 + for i := len(lines) - 1; i > 0 && isBlank(lines[i]); i-- { + trimEnd++ + } + lines = lines[:len(lines)-trimEnd] + + return strings.Join(lines, "\n") +} + +func blockStringIndentation(lines []string) int { + var commonIndent *int + for i := 1; i < len(lines); i++ { + l := lines[i] + indent := leadingWhitespace(l) + if indent == len(l) { + // don't consider blank/empty lines + continue + } + if indent == 0 { + return 0 + } + if commonIndent == nil || indent < *commonIndent { + commonIndent = &indent + } + } + if commonIndent == nil { + return 0 + } + return *commonIndent +} + +func isBlank(s string) bool { + return len(s) == 0 || leadingWhitespace(s) == len(s) +} + +func leadingWhitespace(s string) int { + i := 0 + for _, r := range s { + if r != '\t' && r != ' ' { + break + } + i++ + } + return i +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go new file mode 100644 index 000000000..f767e28f0 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/directive.go @@ -0,0 +1,18 @@ +package common + +import "github.com/graph-gophers/graphql-go/types" + +func ParseDirectives(l *Lexer) types.DirectiveList { + var directives types.DirectiveList + for l.Peek() == '@' { + l.ConsumeToken('@') + d := &types.Directive{} + d.Name = l.ConsumeIdentWithLoc() + d.Name.Loc.Column-- + if l.Peek() == '(' { + d.Arguments = ParseArgumentList(l) + } + directives = append(directives, d) + } + return directives +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go new file mode 100644 index 000000000..ff45bcad7 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/lexer.go @@ -0,0 +1,229 @@ +package common + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/types" +) + +type syntaxError string + +type Lexer struct { + sc *scanner.Scanner + next rune + comment bytes.Buffer + useStringDescriptions bool +} + +type Ident struct { + Name string + Loc errors.Location +} + +func NewLexer(s string, useStringDescriptions bool) *Lexer { + sc := &scanner.Scanner{ + Mode: scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanStrings, + } + sc.Init(strings.NewReader(s)) + + l := Lexer{sc: sc, useStringDescriptions: useStringDescriptions} + l.sc.Error = l.CatchScannerError + + return &l +} + +func (l *Lexer) CatchSyntaxError(f func()) (errRes *errors.QueryError) { + defer func() { + if err := recover(); err != nil { + if err, ok := err.(syntaxError); ok { + errRes = errors.Errorf("syntax error: %s", err) + errRes.Locations = []errors.Location{l.Location()} + return + } + panic(err) + } + }() + + f() + return +} + +func (l *Lexer) Peek() rune { + return l.next +} + +// ConsumeWhitespace consumes whitespace and tokens equivalent to whitespace (e.g. commas and comments). +// +// Consumed comment characters will build the description for the next type or field encountered. +// The description is available from `DescComment()`, and will be reset every time `ConsumeWhitespace()` is +// executed unless l.useStringDescriptions is set. +func (l *Lexer) ConsumeWhitespace() { + l.comment.Reset() + for { + l.next = l.sc.Scan() + + if l.next == ',' { + // Similar to white space and line terminators, commas (',') are used to improve the + // legibility of source text and separate lexical tokens but are otherwise syntactically and + // semantically insignificant within GraphQL documents. + // + // http://facebook.github.io/graphql/draft/#sec-Insignificant-Commas + continue + } + + if l.next == '#' { + // GraphQL source documents may contain single-line comments, starting with the '#' marker. + // + // A comment can contain any Unicode code point except `LineTerminator` so a comment always + // consists of all code points starting with the '#' character up to but not including the + // line terminator. + l.consumeComment() + continue + } + + break + } +} + +// consumeDescription optionally consumes a description based on the June 2018 graphql spec if any are present. +// +// Single quote strings are also single line. Triple quote strings can be multi-line. Triple quote strings +// whitespace trimmed on both ends. +// If a description is found, consume any following comments as well +// +// http://facebook.github.io/graphql/June2018/#sec-Descriptions +func (l *Lexer) consumeDescription() string { + // If the next token is not a string, we don't consume it + if l.next != scanner.String { + return "" + } + // Triple quote string is an empty "string" followed by an open quote due to the way the parser treats strings as one token + var desc string + if l.sc.Peek() == '"' { + desc = l.consumeTripleQuoteComment() + } else { + desc = l.consumeStringComment() + } + l.ConsumeWhitespace() + return desc +} + +func (l *Lexer) ConsumeIdent() string { + name := l.sc.TokenText() + l.ConsumeToken(scanner.Ident) + return name +} + +func (l *Lexer) ConsumeIdentWithLoc() types.Ident { + loc := l.Location() + name := l.sc.TokenText() + l.ConsumeToken(scanner.Ident) + return types.Ident{Name: name, Loc: loc} +} + +func (l *Lexer) ConsumeKeyword(keyword string) { + if l.next != scanner.Ident || l.sc.TokenText() != keyword { + l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %q", l.sc.TokenText(), keyword)) + } + l.ConsumeWhitespace() +} + +func (l *Lexer) ConsumeLiteral() *types.PrimitiveValue { + lit := &types.PrimitiveValue{Type: l.next, Text: l.sc.TokenText()} + l.ConsumeWhitespace() + return lit +} + +func (l *Lexer) ConsumeToken(expected rune) { + if l.next != expected { + l.SyntaxError(fmt.Sprintf("unexpected %q, expecting %s", l.sc.TokenText(), scanner.TokenString(expected))) + } + l.ConsumeWhitespace() +} + +func (l *Lexer) DescComment() string { + comment := l.comment.String() + desc := l.consumeDescription() + if l.useStringDescriptions { + return desc + } + return comment +} + +func (l *Lexer) SyntaxError(message string) { + panic(syntaxError(message)) +} + +func (l *Lexer) Location() errors.Location { + return errors.Location{ + Line: l.sc.Line, + Column: l.sc.Column, + } +} + +func (l *Lexer) consumeTripleQuoteComment() string { + l.next = l.sc.Next() + if l.next != '"' { + panic("consumeTripleQuoteComment used in wrong context: no third quote?") + } + + var buf bytes.Buffer + var numQuotes int + for { + l.next = l.sc.Next() + if l.next == '"' { + numQuotes++ + } else { + numQuotes = 0 + } + buf.WriteRune(l.next) + if numQuotes == 3 || l.next == scanner.EOF { + break + } + } + val := buf.String() + val = val[:len(val)-numQuotes] + return blockString(val) +} + +func (l *Lexer) consumeStringComment() string { + val, err := strconv.Unquote(l.sc.TokenText()) + if err != nil { + panic(err) + } + return val +} + +// consumeComment consumes all characters from `#` to the first encountered line terminator. +// The characters are appended to `l.comment`. +func (l *Lexer) consumeComment() { + if l.next != '#' { + panic("consumeComment used in wrong context") + } + + // TODO: count and trim whitespace so we can dedent any following lines. + if l.sc.Peek() == ' ' { + l.sc.Next() + } + + if l.comment.Len() > 0 { + l.comment.WriteRune('\n') + } + + for { + next := l.sc.Next() + if next == '\r' || next == '\n' || next == scanner.EOF { + break + } + l.comment.WriteRune(next) + } +} + +func (l *Lexer) CatchScannerError(s *scanner.Scanner, msg string) { + l.SyntaxError(msg) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go new file mode 100644 index 000000000..a6af3c43f --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/literals.go @@ -0,0 +1,58 @@ +package common + +import ( + "text/scanner" + + "github.com/graph-gophers/graphql-go/types" +) + +func ParseLiteral(l *Lexer, constOnly bool) types.Value { + loc := l.Location() + switch l.Peek() { + case '$': + if constOnly { + l.SyntaxError("variable not allowed") + panic("unreachable") + } + l.ConsumeToken('$') + return &types.Variable{Name: l.ConsumeIdent(), Loc: loc} + + case scanner.Int, scanner.Float, scanner.String, scanner.Ident: + lit := l.ConsumeLiteral() + if lit.Type == scanner.Ident && lit.Text == "null" { + return &types.NullValue{Loc: loc} + } + lit.Loc = loc + return lit + case '-': + l.ConsumeToken('-') + lit := l.ConsumeLiteral() + lit.Text = "-" + lit.Text + lit.Loc = loc + return lit + case '[': + l.ConsumeToken('[') + var list []types.Value + for l.Peek() != ']' { + list = append(list, ParseLiteral(l, constOnly)) + } + l.ConsumeToken(']') + return &types.ListValue{Values: list, Loc: loc} + + case '{': + l.ConsumeToken('{') + var fields []*types.ObjectField + for l.Peek() != '}' { + name := l.ConsumeIdentWithLoc() + l.ConsumeToken(':') + value := ParseLiteral(l, constOnly) + fields = append(fields, &types.ObjectField{Name: name, Value: value}) + } + l.ConsumeToken('}') + return &types.ObjectValue{Fields: fields, Loc: loc} + + default: + l.SyntaxError("invalid value") + panic("unreachable") + } +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go new file mode 100644 index 000000000..4a30f46eb --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/types.go @@ -0,0 +1,67 @@ +package common + +import ( + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/types" +) + +func ParseType(l *Lexer) types.Type { + t := parseNullType(l) + if l.Peek() == '!' { + l.ConsumeToken('!') + return &types.NonNull{OfType: t} + } + return t +} + +func parseNullType(l *Lexer) types.Type { + if l.Peek() == '[' { + l.ConsumeToken('[') + ofType := ParseType(l) + l.ConsumeToken(']') + return &types.List{OfType: ofType} + } + + return &types.TypeName{Ident: l.ConsumeIdentWithLoc()} +} + +type Resolver func(name string) types.Type + +// ResolveType attempts to resolve a type's name against a resolving function. +// This function is used when one needs to check if a TypeName exists in the resolver (typically a Schema). +// +// In the example below, ResolveType would be used to check if the resolving function +// returns a valid type for Dimension: +// +// type Profile { +// picture(dimensions: Dimension): Url +// } +// +// ResolveType recursively unwraps List and NonNull types until a NamedType is reached. +func ResolveType(t types.Type, resolver Resolver) (types.Type, *errors.QueryError) { + switch t := t.(type) { + case *types.List: + ofType, err := ResolveType(t.OfType, resolver) + if err != nil { + return nil, err + } + return &types.List{OfType: ofType}, nil + case *types.NonNull: + ofType, err := ResolveType(t.OfType, resolver) + if err != nil { + return nil, err + } + return &types.NonNull{OfType: ofType}, nil + case *types.TypeName: + refT := resolver(t.Name) + if refT == nil { + err := errors.Errorf("Unknown type %q.", t.Name) + err.Rule = "KnownTypeNames" + err.Locations = []errors.Location{t.Loc} + return nil, err + } + return refT, nil + default: + return t, nil + } +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go new file mode 100644 index 000000000..2d6e0b54e --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/common/values.go @@ -0,0 +1,37 @@ +package common + +import ( + "github.com/graph-gophers/graphql-go/types" +) + +func ParseInputValue(l *Lexer) *types.InputValueDefinition { + p := &types.InputValueDefinition{} + p.Loc = l.Location() + p.Desc = l.DescComment() + p.Name = l.ConsumeIdentWithLoc() + l.ConsumeToken(':') + p.TypeLoc = l.Location() + p.Type = ParseType(l) + if l.Peek() == '=' { + l.ConsumeToken('=') + p.Default = ParseLiteral(l, true) + } + p.Directives = ParseDirectives(l) + return p +} + +func ParseArgumentList(l *Lexer) types.ArgumentList { + var args types.ArgumentList + l.ConsumeToken('(') + for l.Peek() != ')' { + name := l.ConsumeIdentWithLoc() + l.ConsumeToken(':') + value := ParseLiteral(l, false) + args = append(args, &types.Argument{ + Name: name, + Value: value, + }) + } + l.ConsumeToken(')') + return args +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go new file mode 100644 index 000000000..6b478487b --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/exec.go @@ -0,0 +1,381 @@ +package exec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "reflect" + "sync" + "time" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/exec/selected" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/log" + "github.com/graph-gophers/graphql-go/trace" + "github.com/graph-gophers/graphql-go/types" +) + +type Request struct { + selected.Request + Limiter chan struct{} + Tracer trace.Tracer + Logger log.Logger + PanicHandler errors.PanicHandler + SubscribeResolverTimeout time.Duration +} + +func (r *Request) handlePanic(ctx context.Context) { + if value := recover(); value != nil { + r.Logger.LogPanic(ctx, value) + r.AddError(r.PanicHandler.MakePanicError(ctx, value)) + } +} + +type extensionser interface { + Extensions() map[string]interface{} +} + +func (r *Request) Execute(ctx context.Context, s *resolvable.Schema, op *types.OperationDefinition) ([]byte, []*errors.QueryError) { + var out bytes.Buffer + func() { + defer r.handlePanic(ctx) + sels := selected.ApplyOperation(&r.Request, s, op) + r.execSelections(ctx, sels, nil, s, s.Resolver, &out, op.Type == query.Mutation) + }() + + if err := ctx.Err(); err != nil { + return nil, []*errors.QueryError{errors.Errorf("%s", err)} + } + + return out.Bytes(), r.Errs +} + +type fieldToExec struct { + field *selected.SchemaField + sels []selected.Selection + resolver reflect.Value + out *bytes.Buffer +} + +func resolvedToNull(b *bytes.Buffer) bool { + return bytes.Equal(b.Bytes(), []byte("null")) +} + +func (r *Request) execSelections(ctx context.Context, sels []selected.Selection, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer, serially bool) { + async := !serially && selected.HasAsyncSel(sels) + + var fields []*fieldToExec + collectFieldsToResolve(sels, s, resolver, &fields, make(map[string]*fieldToExec)) + + if async { + var wg sync.WaitGroup + wg.Add(len(fields)) + for _, f := range fields { + go func(f *fieldToExec) { + defer wg.Done() + defer r.handlePanic(ctx) + f.out = new(bytes.Buffer) + execFieldSelection(ctx, r, s, f, &pathSegment{path, f.field.Alias}, true) + }(f) + } + wg.Wait() + } else { + for _, f := range fields { + f.out = new(bytes.Buffer) + execFieldSelection(ctx, r, s, f, &pathSegment{path, f.field.Alias}, true) + } + } + + out.WriteByte('{') + for i, f := range fields { + // If a non-nullable child resolved to null, an error was added to the + // "errors" list in the response, so this field resolves to null. + // If this field is non-nullable, the error is propagated to its parent. + if _, ok := f.field.Type.(*types.NonNull); ok && resolvedToNull(f.out) { + out.Reset() + out.Write([]byte("null")) + return + } + + if i > 0 { + out.WriteByte(',') + } + out.WriteByte('"') + out.WriteString(f.field.Alias) + out.WriteByte('"') + out.WriteByte(':') + out.Write(f.out.Bytes()) + } + out.WriteByte('}') +} + +func collectFieldsToResolve(sels []selected.Selection, s *resolvable.Schema, resolver reflect.Value, fields *[]*fieldToExec, fieldByAlias map[string]*fieldToExec) { + for _, sel := range sels { + switch sel := sel.(type) { + case *selected.SchemaField: + field, ok := fieldByAlias[sel.Alias] + if !ok { // validation already checked for conflict (TODO) + field = &fieldToExec{field: sel, resolver: resolver} + fieldByAlias[sel.Alias] = field + *fields = append(*fields, field) + } + field.sels = append(field.sels, sel.Sels...) + + case *selected.TypenameField: + _, ok := fieldByAlias[sel.Alias] + if !ok { + res := reflect.ValueOf(typeOf(sel, resolver)) + f := s.FieldTypename + f.TypeName = res.String() + + sf := &selected.SchemaField{ + Field: f, + Alias: sel.Alias, + FixedResult: res, + } + + field := &fieldToExec{field: sf, resolver: resolver} + *fields = append(*fields, field) + fieldByAlias[sel.Alias] = field + } + + case *selected.TypeAssertion: + out := resolver.Method(sel.MethodIndex).Call(nil) + if !out[1].Bool() { + continue + } + collectFieldsToResolve(sel.Sels, s, out[0], fields, fieldByAlias) + + default: + panic("unreachable") + } + } +} + +func typeOf(tf *selected.TypenameField, resolver reflect.Value) string { + if len(tf.TypeAssertions) == 0 { + return tf.Name + } + for name, a := range tf.TypeAssertions { + out := resolver.Method(a.MethodIndex).Call(nil) + if out[1].Bool() { + return name + } + } + return "" +} + +func execFieldSelection(ctx context.Context, r *Request, s *resolvable.Schema, f *fieldToExec, path *pathSegment, applyLimiter bool) { + if applyLimiter { + r.Limiter <- struct{}{} + } + + var result reflect.Value + var err *errors.QueryError + + traceCtx, finish := r.Tracer.TraceField(ctx, f.field.TraceLabel, f.field.TypeName, f.field.Name, !f.field.Async, f.field.Args) + defer func() { + finish(err) + }() + + err = func() (err *errors.QueryError) { + defer func() { + if panicValue := recover(); panicValue != nil { + r.Logger.LogPanic(ctx, panicValue) + err = r.PanicHandler.MakePanicError(ctx, panicValue) + err.Path = path.toSlice() + } + }() + + if f.field.FixedResult.IsValid() { + result = f.field.FixedResult + return nil + } + + if err := traceCtx.Err(); err != nil { + return errors.Errorf("%s", err) // don't execute any more resolvers if context got cancelled + } + + res := f.resolver + if f.field.UseMethodResolver() { + var in []reflect.Value + if f.field.HasContext { + in = append(in, reflect.ValueOf(traceCtx)) + } + if f.field.ArgsPacker != nil { + in = append(in, f.field.PackedArgs) + } + callOut := res.Method(f.field.MethodIndex).Call(in) + result = callOut[0] + if f.field.HasError && !callOut[1].IsNil() { + resolverErr := callOut[1].Interface().(error) + err := errors.Errorf("%s", resolverErr) + err.Path = path.toSlice() + err.ResolverError = resolverErr + if ex, ok := callOut[1].Interface().(extensionser); ok { + err.Extensions = ex.Extensions() + } + return err + } + } else { + // TODO extract out unwrapping ptr logic to a common place + if res.Kind() == reflect.Ptr { + res = res.Elem() + } + result = res.FieldByIndex(f.field.FieldIndex) + } + return nil + }() + + if applyLimiter { + <-r.Limiter + } + + if err != nil { + // If an error occurred while resolving a field, it should be treated as though the field + // returned null, and an error must be added to the "errors" list in the response. + r.AddError(err) + f.out.WriteString("null") + return + } + + r.execSelectionSet(traceCtx, f.sels, f.field.Type, path, s, result, f.out) +} + +func (r *Request) execSelectionSet(ctx context.Context, sels []selected.Selection, typ types.Type, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer) { + t, nonNull := unwrapNonNull(typ) + + // a reflect.Value of a nil interface will show up as an Invalid value + if resolver.Kind() == reflect.Invalid || ((resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface) && resolver.IsNil()) { + // If a field of a non-null type resolves to null (either because the + // function to resolve the field returned null or because an error occurred), + // add an error to the "errors" list in the response. + if nonNull { + err := errors.Errorf("graphql: got nil for non-null %q", t) + err.Path = path.toSlice() + r.AddError(err) + } + out.WriteString("null") + return + } + + switch t.(type) { + case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union: + r.execSelections(ctx, sels, path, s, resolver, out, false) + return + } + + // Any pointers or interfaces at this point should be non-nil, so we can get the actual value of them + // for serialization + if resolver.Kind() == reflect.Ptr || resolver.Kind() == reflect.Interface { + resolver = resolver.Elem() + } + + switch t := t.(type) { + case *types.List: + r.execList(ctx, sels, t, path, s, resolver, out) + + case *types.ScalarTypeDefinition: + v := resolver.Interface() + data, err := json.Marshal(v) + if err != nil { + panic(errors.Errorf("could not marshal %v: %s", v, err)) + } + out.Write(data) + + case *types.EnumTypeDefinition: + var stringer fmt.Stringer = resolver + if s, ok := resolver.Interface().(fmt.Stringer); ok { + stringer = s + } + name := stringer.String() + var valid bool + for _, v := range t.EnumValuesDefinition { + if v.EnumValue == name { + valid = true + break + } + } + if !valid { + err := errors.Errorf("Invalid value %s.\nExpected type %s, found %s.", name, t.Name, name) + err.Path = path.toSlice() + r.AddError(err) + out.WriteString("null") + return + } + out.WriteByte('"') + out.WriteString(name) + out.WriteByte('"') + + default: + panic("unreachable") + } +} + +func (r *Request) execList(ctx context.Context, sels []selected.Selection, typ *types.List, path *pathSegment, s *resolvable.Schema, resolver reflect.Value, out *bytes.Buffer) { + l := resolver.Len() + entryouts := make([]bytes.Buffer, l) + + if selected.HasAsyncSel(sels) { + // Limit the number of concurrent goroutines spawned as it can lead to large + // memory spikes for large lists. + concurrency := cap(r.Limiter) + sem := make(chan struct{}, concurrency) + for i := 0; i < l; i++ { + sem <- struct{}{} + go func(i int) { + defer func() { <-sem }() + defer r.handlePanic(ctx) + r.execSelectionSet(ctx, sels, typ.OfType, &pathSegment{path, i}, s, resolver.Index(i), &entryouts[i]) + }(i) + } + for i := 0; i < concurrency; i++ { + sem <- struct{}{} + } + } else { + for i := 0; i < l; i++ { + r.execSelectionSet(ctx, sels, typ.OfType, &pathSegment{path, i}, s, resolver.Index(i), &entryouts[i]) + } + } + + _, listOfNonNull := typ.OfType.(*types.NonNull) + + out.WriteByte('[') + for i, entryout := range entryouts { + // If the list wraps a non-null type and one of the list elements + // resolves to null, then the entire list resolves to null. + if listOfNonNull && resolvedToNull(&entryout) { + out.Reset() + out.WriteString("null") + return + } + + if i > 0 { + out.WriteByte(',') + } + out.Write(entryout.Bytes()) + } + out.WriteByte(']') +} + +func unwrapNonNull(t types.Type) (types.Type, bool) { + if nn, ok := t.(*types.NonNull); ok { + return nn.OfType, true + } + return t, false +} + +type pathSegment struct { + parent *pathSegment + value interface{} +} + +func (p *pathSegment) toSlice() []interface{} { + if p == nil { + return nil + } + return append(p.parent.toSlice(), p.value) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go new file mode 100644 index 000000000..c0bb7dc91 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/packer/packer.go @@ -0,0 +1,390 @@ +package packer + +import ( + "fmt" + "math" + "reflect" + "strings" + + "github.com/graph-gophers/graphql-go/decode" + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/types" +) + +type packer interface { + Pack(value interface{}) (reflect.Value, error) +} + +type Builder struct { + packerMap map[typePair]*packerMapEntry + structPackers []*StructPacker +} + +type typePair struct { + graphQLType types.Type + resolverType reflect.Type +} + +type packerMapEntry struct { + packer packer + targets []*packer +} + +func NewBuilder() *Builder { + return &Builder{ + packerMap: make(map[typePair]*packerMapEntry), + } +} + +func (b *Builder) Finish() error { + for _, entry := range b.packerMap { + for _, target := range entry.targets { + *target = entry.packer + } + } + + for _, p := range b.structPackers { + p.defaultStruct = reflect.New(p.structType).Elem() + for _, f := range p.fields { + if defaultVal := f.field.Default; defaultVal != nil { + v, err := f.fieldPacker.Pack(defaultVal.Deserialize(nil)) + if err != nil { + return err + } + p.defaultStruct.FieldByIndex(f.fieldIndex).Set(v) + } + } + } + + return nil +} + +func (b *Builder) assignPacker(target *packer, schemaType types.Type, reflectType reflect.Type) error { + k := typePair{schemaType, reflectType} + ref, ok := b.packerMap[k] + if !ok { + ref = &packerMapEntry{} + b.packerMap[k] = ref + var err error + ref.packer, err = b.makePacker(schemaType, reflectType) + if err != nil { + return err + } + } + ref.targets = append(ref.targets, target) + return nil +} + +func (b *Builder) makePacker(schemaType types.Type, reflectType reflect.Type) (packer, error) { + t, nonNull := unwrapNonNull(schemaType) + if !nonNull { + if reflectType.Kind() == reflect.Ptr { + elemType := reflectType.Elem() + addPtr := true + if _, ok := t.(*types.InputObject); ok { + elemType = reflectType // keep pointer for input objects + addPtr = false + } + elem, err := b.makeNonNullPacker(t, elemType) + if err != nil { + return nil, err + } + return &nullPacker{ + elemPacker: elem, + valueType: reflectType, + addPtr: addPtr, + }, nil + } else if isNullable(reflectType) { + elemType := reflectType + addPtr := false + elem, err := b.makeNonNullPacker(t, elemType) + if err != nil { + return nil, err + } + return &nullPacker{ + elemPacker: elem, + valueType: reflectType, + addPtr: addPtr, + }, nil + } else { + return nil, fmt.Errorf("%s is not a pointer or a nullable type", reflectType) + } + } + + return b.makeNonNullPacker(t, reflectType) +} + +func (b *Builder) makeNonNullPacker(schemaType types.Type, reflectType reflect.Type) (packer, error) { + if u, ok := reflect.New(reflectType).Interface().(decode.Unmarshaler); ok { + if !u.ImplementsGraphQLType(schemaType.String()) { + return nil, fmt.Errorf("can not unmarshal %s into %s", schemaType, reflectType) + } + return &unmarshalerPacker{ + ValueType: reflectType, + }, nil + } + + switch t := schemaType.(type) { + case *types.ScalarTypeDefinition: + return &ValuePacker{ + ValueType: reflectType, + }, nil + + case *types.EnumTypeDefinition: + if reflectType.Kind() != reflect.String { + return nil, fmt.Errorf("wrong type, expected %s", reflect.String) + } + return &ValuePacker{ + ValueType: reflectType, + }, nil + + case *types.InputObject: + e, err := b.MakeStructPacker(t.Values, reflectType) + if err != nil { + return nil, err + } + return e, nil + + case *types.List: + if reflectType.Kind() != reflect.Slice { + return nil, fmt.Errorf("expected slice, got %s", reflectType) + } + p := &listPacker{ + sliceType: reflectType, + } + if err := b.assignPacker(&p.elem, t.OfType, reflectType.Elem()); err != nil { + return nil, err + } + return p, nil + + case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union: + return nil, fmt.Errorf("type of kind %s can not be used as input", t.Kind()) + + default: + panic("unreachable") + } +} + +func (b *Builder) MakeStructPacker(values []*types.InputValueDefinition, typ reflect.Type) (*StructPacker, error) { + structType := typ + usePtr := false + if typ.Kind() == reflect.Ptr { + structType = typ.Elem() + usePtr = true + } + if structType.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct or pointer to struct, got %s (hint: missing `args struct { ... }` wrapper for field arguments?)", typ) + } + + var fields []*structPackerField + for _, v := range values { + fe := &structPackerField{field: v} + fx := func(n string) bool { + return strings.EqualFold(stripUnderscore(n), stripUnderscore(v.Name.Name)) + } + + sf, ok := structType.FieldByNameFunc(fx) + if !ok { + return nil, fmt.Errorf("%s does not define field %q (hint: missing `args struct { ... }` wrapper for field arguments, or missing field on input struct)", typ, v.Name.Name) + } + if sf.PkgPath != "" { + return nil, fmt.Errorf("field %q must be exported", sf.Name) + } + fe.fieldIndex = sf.Index + + ft := v.Type + if v.Default != nil { + ft, _ = unwrapNonNull(ft) + ft = &types.NonNull{OfType: ft} + } + + if err := b.assignPacker(&fe.fieldPacker, ft, sf.Type); err != nil { + return nil, fmt.Errorf("field %q: %s", sf.Name, err) + } + + fields = append(fields, fe) + } + + p := &StructPacker{ + structType: structType, + usePtr: usePtr, + fields: fields, + } + b.structPackers = append(b.structPackers, p) + return p, nil +} + +type StructPacker struct { + structType reflect.Type + usePtr bool + defaultStruct reflect.Value + fields []*structPackerField +} + +type structPackerField struct { + field *types.InputValueDefinition + fieldIndex []int + fieldPacker packer +} + +func (p *StructPacker) Pack(value interface{}) (reflect.Value, error) { + if value == nil { + return reflect.Value{}, errors.Errorf("got null for non-null") + } + + values := value.(map[string]interface{}) + v := reflect.New(p.structType) + v.Elem().Set(p.defaultStruct) + for _, f := range p.fields { + if value, ok := values[f.field.Name.Name]; ok { + packed, err := f.fieldPacker.Pack(value) + if err != nil { + return reflect.Value{}, err + } + v.Elem().FieldByIndex(f.fieldIndex).Set(packed) + } + } + if !p.usePtr { + return v.Elem(), nil + } + return v, nil +} + +type listPacker struct { + sliceType reflect.Type + elem packer +} + +func (e *listPacker) Pack(value interface{}) (reflect.Value, error) { + list, ok := value.([]interface{}) + if !ok { + list = []interface{}{value} + } + + v := reflect.MakeSlice(e.sliceType, len(list), len(list)) + for i := range list { + packed, err := e.elem.Pack(list[i]) + if err != nil { + return reflect.Value{}, err + } + v.Index(i).Set(packed) + } + return v, nil +} + +type nullPacker struct { + elemPacker packer + valueType reflect.Type + addPtr bool +} + +func (p *nullPacker) Pack(value interface{}) (reflect.Value, error) { + if value == nil && !isNullable(p.valueType) { + return reflect.Zero(p.valueType), nil + } + + v, err := p.elemPacker.Pack(value) + if err != nil { + return reflect.Value{}, err + } + + if p.addPtr { + ptr := reflect.New(p.valueType.Elem()) + ptr.Elem().Set(v) + return ptr, nil + } + + return v, nil +} + +type ValuePacker struct { + ValueType reflect.Type +} + +func (p *ValuePacker) Pack(value interface{}) (reflect.Value, error) { + if value == nil { + return reflect.Value{}, errors.Errorf("got null for non-null") + } + + coerced, err := unmarshalInput(p.ValueType, value) + if err != nil { + return reflect.Value{}, fmt.Errorf("could not unmarshal %#v (%T) into %s: %s", value, value, p.ValueType, err) + } + return reflect.ValueOf(coerced), nil +} + +type unmarshalerPacker struct { + ValueType reflect.Type +} + +func (p *unmarshalerPacker) Pack(value interface{}) (reflect.Value, error) { + if value == nil && !isNullable(p.ValueType) { + return reflect.Value{}, errors.Errorf("got null for non-null") + } + + v := reflect.New(p.ValueType) + if err := v.Interface().(decode.Unmarshaler).UnmarshalGraphQL(value); err != nil { + return reflect.Value{}, err + } + return v.Elem(), nil +} + +func unmarshalInput(typ reflect.Type, input interface{}) (interface{}, error) { + if reflect.TypeOf(input) == typ { + return input, nil + } + + switch typ.Kind() { + case reflect.Int32: + switch input := input.(type) { + case int: + if input < math.MinInt32 || input > math.MaxInt32 { + return nil, fmt.Errorf("not a 32-bit integer") + } + return int32(input), nil + case float64: + coerced := int32(input) + if input < math.MinInt32 || input > math.MaxInt32 || float64(coerced) != input { + return nil, fmt.Errorf("not a 32-bit integer") + } + return coerced, nil + } + + case reflect.Float64: + switch input := input.(type) { + case int32: + return float64(input), nil + case int: + return float64(input), nil + } + + case reflect.String: + if reflect.TypeOf(input).ConvertibleTo(typ) { + return reflect.ValueOf(input).Convert(typ).Interface(), nil + } + } + + return nil, fmt.Errorf("incompatible type") +} + +func unwrapNonNull(t types.Type) (types.Type, bool) { + if nn, ok := t.(*types.NonNull); ok { + return nn.OfType, true + } + return t, false +} + +func stripUnderscore(s string) string { + return strings.Replace(s, "_", "", -1) +} + +// NullUnmarshaller is an unmarshaller that can handle a nil input +type NullUnmarshaller interface { + decode.Unmarshaler + Nullable() +} + +func isNullable(t reflect.Type) bool { + _, ok := reflect.New(t).Interface().(NullUnmarshaller) + return ok +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go new file mode 100644 index 000000000..02d5e262f --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/meta.go @@ -0,0 +1,70 @@ +package resolvable + +import ( + "reflect" + + "github.com/graph-gophers/graphql-go/introspection" + "github.com/graph-gophers/graphql-go/types" +) + +// Meta defines the details of the metadata schema for introspection. +type Meta struct { + FieldSchema Field + FieldType Field + FieldTypename Field + Schema *Object + Type *Object +} + +func newMeta(s *types.Schema) *Meta { + var err error + b := newBuilder(s) + + metaSchema := s.Types["__Schema"].(*types.ObjectTypeDefinition) + so, err := b.makeObjectExec(metaSchema.Name, metaSchema.Fields, nil, false, reflect.TypeOf(&introspection.Schema{})) + if err != nil { + panic(err) + } + + metaType := s.Types["__Type"].(*types.ObjectTypeDefinition) + t, err := b.makeObjectExec(metaType.Name, metaType.Fields, nil, false, reflect.TypeOf(&introspection.Type{})) + if err != nil { + panic(err) + } + + if err := b.finish(); err != nil { + panic(err) + } + + fieldTypename := Field{ + FieldDefinition: types.FieldDefinition{ + Name: "__typename", + Type: &types.NonNull{OfType: s.Types["String"]}, + }, + TraceLabel: "GraphQL field: __typename", + } + + fieldSchema := Field{ + FieldDefinition: types.FieldDefinition{ + Name: "__schema", + Type: s.Types["__Schema"], + }, + TraceLabel: "GraphQL field: __schema", + } + + fieldType := Field{ + FieldDefinition: types.FieldDefinition{ + Name: "__type", + Type: s.Types["__Type"], + }, + TraceLabel: "GraphQL field: __type", + } + + return &Meta{ + FieldSchema: fieldSchema, + FieldTypename: fieldTypename, + FieldType: fieldType, + Schema: so, + Type: t, + } +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go new file mode 100644 index 000000000..3410f5578 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/resolvable/resolvable.go @@ -0,0 +1,453 @@ +package resolvable + +import ( + "context" + "fmt" + "reflect" + "strings" + + "github.com/graph-gophers/graphql-go/decode" + "github.com/graph-gophers/graphql-go/internal/exec/packer" + "github.com/graph-gophers/graphql-go/types" +) + +type Schema struct { + *Meta + types.Schema + Query Resolvable + Mutation Resolvable + Subscription Resolvable + Resolver reflect.Value +} + +type Resolvable interface { + isResolvable() +} + +type Object struct { + Name string + Fields map[string]*Field + TypeAssertions map[string]*TypeAssertion +} + +type Field struct { + types.FieldDefinition + TypeName string + MethodIndex int + FieldIndex []int + HasContext bool + HasError bool + ArgsPacker *packer.StructPacker + ValueExec Resolvable + TraceLabel string +} + +func (f *Field) UseMethodResolver() bool { + return len(f.FieldIndex) == 0 +} + +type TypeAssertion struct { + MethodIndex int + TypeExec Resolvable +} + +type List struct { + Elem Resolvable +} + +type Scalar struct{} + +func (*Object) isResolvable() {} +func (*List) isResolvable() {} +func (*Scalar) isResolvable() {} + +func ApplyResolver(s *types.Schema, resolver interface{}) (*Schema, error) { + if resolver == nil { + return &Schema{Meta: newMeta(s), Schema: *s}, nil + } + + b := newBuilder(s) + + var query, mutation, subscription Resolvable + + if t, ok := s.EntryPoints["query"]; ok { + if err := b.assignExec(&query, t, reflect.TypeOf(resolver)); err != nil { + return nil, err + } + } + + if t, ok := s.EntryPoints["mutation"]; ok { + if err := b.assignExec(&mutation, t, reflect.TypeOf(resolver)); err != nil { + return nil, err + } + } + + if t, ok := s.EntryPoints["subscription"]; ok { + if err := b.assignExec(&subscription, t, reflect.TypeOf(resolver)); err != nil { + return nil, err + } + } + + if err := b.finish(); err != nil { + return nil, err + } + + return &Schema{ + Meta: newMeta(s), + Schema: *s, + Resolver: reflect.ValueOf(resolver), + Query: query, + Mutation: mutation, + Subscription: subscription, + }, nil +} + +type execBuilder struct { + schema *types.Schema + resMap map[typePair]*resMapEntry + packerBuilder *packer.Builder +} + +type typePair struct { + graphQLType types.Type + resolverType reflect.Type +} + +type resMapEntry struct { + exec Resolvable + targets []*Resolvable +} + +func newBuilder(s *types.Schema) *execBuilder { + return &execBuilder{ + schema: s, + resMap: make(map[typePair]*resMapEntry), + packerBuilder: packer.NewBuilder(), + } +} + +func (b *execBuilder) finish() error { + for _, entry := range b.resMap { + for _, target := range entry.targets { + *target = entry.exec + } + } + + return b.packerBuilder.Finish() +} + +func (b *execBuilder) assignExec(target *Resolvable, t types.Type, resolverType reflect.Type) error { + k := typePair{t, resolverType} + ref, ok := b.resMap[k] + if !ok { + ref = &resMapEntry{} + b.resMap[k] = ref + var err error + ref.exec, err = b.makeExec(t, resolverType) + if err != nil { + return err + } + } + ref.targets = append(ref.targets, target) + return nil +} + +func (b *execBuilder) makeExec(t types.Type, resolverType reflect.Type) (Resolvable, error) { + var nonNull bool + t, nonNull = unwrapNonNull(t) + + switch t := t.(type) { + case *types.ObjectTypeDefinition: + return b.makeObjectExec(t.Name, t.Fields, nil, nonNull, resolverType) + + case *types.InterfaceTypeDefinition: + return b.makeObjectExec(t.Name, t.Fields, t.PossibleTypes, nonNull, resolverType) + + case *types.Union: + return b.makeObjectExec(t.Name, nil, t.UnionMemberTypes, nonNull, resolverType) + } + + if !nonNull { + if resolverType.Kind() != reflect.Ptr { + return nil, fmt.Errorf("%s is not a pointer", resolverType) + } + resolverType = resolverType.Elem() + } + + switch t := t.(type) { + case *types.ScalarTypeDefinition: + return makeScalarExec(t, resolverType) + + case *types.EnumTypeDefinition: + return &Scalar{}, nil + + case *types.List: + if resolverType.Kind() != reflect.Slice { + return nil, fmt.Errorf("%s is not a slice", resolverType) + } + e := &List{} + if err := b.assignExec(&e.Elem, t.OfType, resolverType.Elem()); err != nil { + return nil, err + } + return e, nil + + default: + panic("invalid type: " + t.String()) + } +} + +func makeScalarExec(t *types.ScalarTypeDefinition, resolverType reflect.Type) (Resolvable, error) { + implementsType := false + switch r := reflect.New(resolverType).Interface().(type) { + case *int32: + implementsType = t.Name == "Int" + case *float64: + implementsType = t.Name == "Float" + case *string: + implementsType = t.Name == "String" + case *bool: + implementsType = t.Name == "Boolean" + case decode.Unmarshaler: + implementsType = r.ImplementsGraphQLType(t.Name) + } + + if !implementsType { + return nil, fmt.Errorf("can not use %s as %s", resolverType, t.Name) + } + return &Scalar{}, nil +} + +func (b *execBuilder) makeObjectExec(typeName string, fields types.FieldsDefinition, possibleTypes []*types.ObjectTypeDefinition, + nonNull bool, resolverType reflect.Type) (*Object, error) { + if !nonNull { + if resolverType.Kind() != reflect.Ptr && resolverType.Kind() != reflect.Interface { + return nil, fmt.Errorf("%s is not a pointer or interface", resolverType) + } + } + + methodHasReceiver := resolverType.Kind() != reflect.Interface + + Fields := make(map[string]*Field) + rt := unwrapPtr(resolverType) + fieldsCount := fieldCount(rt, map[string]int{}) + for _, f := range fields { + var fieldIndex []int + methodIndex := findMethod(resolverType, f.Name) + if b.schema.UseFieldResolvers && methodIndex == -1 { + if fieldsCount[strings.ToLower(stripUnderscore(f.Name))] > 1 { + return nil, fmt.Errorf("%s does not resolve %q: ambiguous field %q", resolverType, typeName, f.Name) + } + fieldIndex = findField(rt, f.Name, []int{}) + } + if methodIndex == -1 && len(fieldIndex) == 0 { + hint := "" + if findMethod(reflect.PtrTo(resolverType), f.Name) != -1 { + hint = " (hint: the method exists on the pointer type)" + } + return nil, fmt.Errorf("%s does not resolve %q: missing method for field %q%s", resolverType, typeName, f.Name, hint) + } + + var m reflect.Method + var sf reflect.StructField + if methodIndex != -1 { + m = resolverType.Method(methodIndex) + } else { + sf = rt.FieldByIndex(fieldIndex) + } + fe, err := b.makeFieldExec(typeName, f, m, sf, methodIndex, fieldIndex, methodHasReceiver) + if err != nil { + var resolverName string + if methodIndex != -1 { + resolverName = m.Name + } else { + resolverName = sf.Name + } + return nil, fmt.Errorf("%s\n\tused by (%s).%s", err, resolverType, resolverName) + } + Fields[f.Name] = fe + } + + // Check type assertions when + // 1) using method resolvers + // 2) Or resolver is not an interface type + typeAssertions := make(map[string]*TypeAssertion) + if !b.schema.UseFieldResolvers || resolverType.Kind() != reflect.Interface { + for _, impl := range possibleTypes { + methodIndex := findMethod(resolverType, "To"+impl.Name) + if methodIndex == -1 { + return nil, fmt.Errorf("%s does not resolve %q: missing method %q to convert to %q", resolverType, typeName, "To"+impl.Name, impl.Name) + } + if resolverType.Method(methodIndex).Type.NumOut() != 2 { + return nil, fmt.Errorf("%s does not resolve %q: method %q should return a value and a bool indicating success", resolverType, typeName, "To"+impl.Name) + } + a := &TypeAssertion{ + MethodIndex: methodIndex, + } + if err := b.assignExec(&a.TypeExec, impl, resolverType.Method(methodIndex).Type.Out(0)); err != nil { + return nil, err + } + typeAssertions[impl.Name] = a + } + } + + return &Object{ + Name: typeName, + Fields: Fields, + TypeAssertions: typeAssertions, + }, nil +} + +var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() +var errorType = reflect.TypeOf((*error)(nil)).Elem() + +func (b *execBuilder) makeFieldExec(typeName string, f *types.FieldDefinition, m reflect.Method, sf reflect.StructField, + methodIndex int, fieldIndex []int, methodHasReceiver bool) (*Field, error) { + + var argsPacker *packer.StructPacker + var hasError bool + var hasContext bool + + // Validate resolver method only when there is one + if methodIndex != -1 { + in := make([]reflect.Type, m.Type.NumIn()) + for i := range in { + in[i] = m.Type.In(i) + } + if methodHasReceiver { + in = in[1:] // first parameter is receiver + } + + hasContext = len(in) > 0 && in[0] == contextType + if hasContext { + in = in[1:] + } + + if len(f.Arguments) > 0 { + if len(in) == 0 { + return nil, fmt.Errorf("must have parameter for field arguments") + } + var err error + argsPacker, err = b.packerBuilder.MakeStructPacker(f.Arguments, in[0]) + if err != nil { + return nil, err + } + in = in[1:] + } + + if len(in) > 0 { + return nil, fmt.Errorf("too many parameters") + } + + maxNumOfReturns := 2 + if m.Type.NumOut() < maxNumOfReturns-1 { + return nil, fmt.Errorf("too few return values") + } + + if m.Type.NumOut() > maxNumOfReturns { + return nil, fmt.Errorf("too many return values") + } + + hasError = m.Type.NumOut() == maxNumOfReturns + if hasError { + if m.Type.Out(maxNumOfReturns-1) != errorType { + return nil, fmt.Errorf(`must have "error" as its last return value`) + } + } + } + + fe := &Field{ + FieldDefinition: *f, + TypeName: typeName, + MethodIndex: methodIndex, + FieldIndex: fieldIndex, + HasContext: hasContext, + ArgsPacker: argsPacker, + HasError: hasError, + TraceLabel: fmt.Sprintf("GraphQL field: %s.%s", typeName, f.Name), + } + + var out reflect.Type + if methodIndex != -1 { + out = m.Type.Out(0) + sub, ok := b.schema.EntryPoints["subscription"] + if ok && typeName == sub.TypeName() && out.Kind() == reflect.Chan { + out = m.Type.Out(0).Elem() + } + } else { + out = sf.Type + } + if err := b.assignExec(&fe.ValueExec, f.Type, out); err != nil { + return nil, err + } + + return fe, nil +} + +func findMethod(t reflect.Type, name string) int { + for i := 0; i < t.NumMethod(); i++ { + if strings.EqualFold(stripUnderscore(name), stripUnderscore(t.Method(i).Name)) { + return i + } + } + return -1 +} + +func findField(t reflect.Type, name string, index []int) []int { + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + if field.Type.Kind() == reflect.Struct && field.Anonymous { + newIndex := findField(field.Type, name, []int{i}) + if len(newIndex) > 1 { + return append(index, newIndex...) + } + } + + if strings.EqualFold(stripUnderscore(name), stripUnderscore(field.Name)) { + return append(index, i) + } + } + + return index +} + +// fieldCount helps resolve ambiguity when more than one embedded struct contains fields with the same name. +func fieldCount(t reflect.Type, count map[string]int) map[string]int { + if t.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + fieldName := strings.ToLower(stripUnderscore(field.Name)) + + if field.Type.Kind() == reflect.Struct && field.Anonymous { + count = fieldCount(field.Type, count) + } else { + if _, ok := count[fieldName]; !ok { + count[fieldName] = 0 + } + count[fieldName]++ + } + } + + return count +} + +func unwrapNonNull(t types.Type) (types.Type, bool) { + if nn, ok := t.(*types.NonNull); ok { + return nn.OfType, true + } + return t, false +} + +func stripUnderscore(s string) string { + return strings.Replace(s, "_", "", -1) +} + +func unwrapPtr(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return t.Elem() + } + return t +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go new file mode 100644 index 000000000..9b96d2b62 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/selected/selected.go @@ -0,0 +1,269 @@ +package selected + +import ( + "fmt" + "reflect" + "sync" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/exec/packer" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/introspection" + "github.com/graph-gophers/graphql-go/types" +) + +type Request struct { + Schema *types.Schema + Doc *types.ExecutableDefinition + Vars map[string]interface{} + Mu sync.Mutex + Errs []*errors.QueryError + DisableIntrospection bool +} + +func (r *Request) AddError(err *errors.QueryError) { + r.Mu.Lock() + r.Errs = append(r.Errs, err) + r.Mu.Unlock() +} + +func ApplyOperation(r *Request, s *resolvable.Schema, op *types.OperationDefinition) []Selection { + var obj *resolvable.Object + switch op.Type { + case query.Query: + obj = s.Query.(*resolvable.Object) + case query.Mutation: + obj = s.Mutation.(*resolvable.Object) + case query.Subscription: + obj = s.Subscription.(*resolvable.Object) + } + return applySelectionSet(r, s, obj, op.Selections) +} + +type Selection interface { + isSelection() +} + +type SchemaField struct { + resolvable.Field + Alias string + Args map[string]interface{} + PackedArgs reflect.Value + Sels []Selection + Async bool + FixedResult reflect.Value +} + +type TypeAssertion struct { + resolvable.TypeAssertion + Sels []Selection +} + +type TypenameField struct { + resolvable.Object + Alias string +} + +func (*SchemaField) isSelection() {} +func (*TypeAssertion) isSelection() {} +func (*TypenameField) isSelection() {} + +func applySelectionSet(r *Request, s *resolvable.Schema, e *resolvable.Object, sels []types.Selection) (flattenedSels []Selection) { + for _, sel := range sels { + switch sel := sel.(type) { + case *types.Field: + field := sel + if skipByDirective(r, field.Directives) { + continue + } + + switch field.Name.Name { + case "__typename": + // __typename is available even though r.DisableIntrospection == true + // because it is necessary when using union types and interfaces: https://graphql.org/learn/schema/#union-types + flattenedSels = append(flattenedSels, &TypenameField{ + Object: *e, + Alias: field.Alias.Name, + }) + + case "__schema": + if !r.DisableIntrospection { + flattenedSels = append(flattenedSels, &SchemaField{ + Field: s.Meta.FieldSchema, + Alias: field.Alias.Name, + Sels: applySelectionSet(r, s, s.Meta.Schema, field.SelectionSet), + Async: true, + FixedResult: reflect.ValueOf(introspection.WrapSchema(r.Schema)), + }) + } + + case "__type": + if !r.DisableIntrospection { + p := packer.ValuePacker{ValueType: reflect.TypeOf("")} + v, err := p.Pack(field.Arguments.MustGet("name").Deserialize(r.Vars)) + if err != nil { + r.AddError(errors.Errorf("%s", err)) + return nil + } + + t, ok := r.Schema.Types[v.String()] + if !ok { + return nil + } + + flattenedSels = append(flattenedSels, &SchemaField{ + Field: s.Meta.FieldType, + Alias: field.Alias.Name, + Sels: applySelectionSet(r, s, s.Meta.Type, field.SelectionSet), + Async: true, + FixedResult: reflect.ValueOf(introspection.WrapType(t)), + }) + } + + default: + fe := e.Fields[field.Name.Name] + + var args map[string]interface{} + var packedArgs reflect.Value + if fe.ArgsPacker != nil { + args = make(map[string]interface{}) + for _, arg := range field.Arguments { + args[arg.Name.Name] = arg.Value.Deserialize(r.Vars) + } + var err error + packedArgs, err = fe.ArgsPacker.Pack(args) + if err != nil { + r.AddError(errors.Errorf("%s", err)) + return + } + } + + fieldSels := applyField(r, s, fe.ValueExec, field.SelectionSet) + flattenedSels = append(flattenedSels, &SchemaField{ + Field: *fe, + Alias: field.Alias.Name, + Args: args, + PackedArgs: packedArgs, + Sels: fieldSels, + Async: fe.HasContext || fe.ArgsPacker != nil || fe.HasError || HasAsyncSel(fieldSels), + }) + } + + case *types.InlineFragment: + frag := sel + if skipByDirective(r, frag.Directives) { + continue + } + flattenedSels = append(flattenedSels, applyFragment(r, s, e, &frag.Fragment)...) + + case *types.FragmentSpread: + spread := sel + if skipByDirective(r, spread.Directives) { + continue + } + flattenedSels = append(flattenedSels, applyFragment(r, s, e, &r.Doc.Fragments.Get(spread.Name.Name).Fragment)...) + + default: + panic("invalid type") + } + } + return +} + +func applyFragment(r *Request, s *resolvable.Schema, e *resolvable.Object, frag *types.Fragment) []Selection { + if frag.On.Name != e.Name { + t := r.Schema.Resolve(frag.On.Name) + face, ok := t.(*types.InterfaceTypeDefinition) + if !ok && frag.On.Name != "" { + a, ok2 := e.TypeAssertions[frag.On.Name] + if !ok2 { + panic(fmt.Errorf("%q does not implement %q", frag.On, e.Name)) // TODO proper error handling + } + + return []Selection{&TypeAssertion{ + TypeAssertion: *a, + Sels: applySelectionSet(r, s, a.TypeExec.(*resolvable.Object), frag.Selections), + }} + } + if ok && len(face.PossibleTypes) > 0 { + sels := []Selection{} + for _, t := range face.PossibleTypes { + if t.Name == e.Name { + return applySelectionSet(r, s, e, frag.Selections) + } + + if a, ok := e.TypeAssertions[t.Name]; ok { + sels = append(sels, &TypeAssertion{ + TypeAssertion: *a, + Sels: applySelectionSet(r, s, a.TypeExec.(*resolvable.Object), frag.Selections), + }) + } + } + if len(sels) == 0 { + panic(fmt.Errorf("%q does not implement %q", e.Name, frag.On)) // TODO proper error handling + } + return sels + } + } + return applySelectionSet(r, s, e, frag.Selections) +} + +func applyField(r *Request, s *resolvable.Schema, e resolvable.Resolvable, sels []types.Selection) []Selection { + switch e := e.(type) { + case *resolvable.Object: + return applySelectionSet(r, s, e, sels) + case *resolvable.List: + return applyField(r, s, e.Elem, sels) + case *resolvable.Scalar: + return nil + default: + panic("unreachable") + } +} + +func skipByDirective(r *Request, directives types.DirectiveList) bool { + if d := directives.Get("skip"); d != nil { + p := packer.ValuePacker{ValueType: reflect.TypeOf(false)} + v, err := p.Pack(d.Arguments.MustGet("if").Deserialize(r.Vars)) + if err != nil { + r.AddError(errors.Errorf("%s", err)) + } + if err == nil && v.Bool() { + return true + } + } + + if d := directives.Get("include"); d != nil { + p := packer.ValuePacker{ValueType: reflect.TypeOf(false)} + v, err := p.Pack(d.Arguments.MustGet("if").Deserialize(r.Vars)) + if err != nil { + r.AddError(errors.Errorf("%s", err)) + } + if err == nil && !v.Bool() { + return true + } + } + + return false +} + +func HasAsyncSel(sels []Selection) bool { + for _, sel := range sels { + switch sel := sel.(type) { + case *SchemaField: + if sel.Async { + return true + } + case *TypeAssertion: + if HasAsyncSel(sel.Sels) { + return true + } + case *TypenameField: + // sync + default: + panic("unreachable") + } + } + return false +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go b/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go new file mode 100644 index 000000000..37ebacbc9 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/exec/subscribe.go @@ -0,0 +1,179 @@ +package exec + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/exec/selected" + "github.com/graph-gophers/graphql-go/types" +) + +type Response struct { + Data json.RawMessage + Errors []*errors.QueryError +} + +func (r *Request) Subscribe(ctx context.Context, s *resolvable.Schema, op *types.OperationDefinition) <-chan *Response { + var result reflect.Value + var f *fieldToExec + var err *errors.QueryError + func() { + defer r.handlePanic(ctx) + + sels := selected.ApplyOperation(&r.Request, s, op) + var fields []*fieldToExec + collectFieldsToResolve(sels, s, s.Resolver, &fields, make(map[string]*fieldToExec)) + + // TODO: move this check into validation.Validate + if len(fields) != 1 { + err = errors.Errorf("%s", "can subscribe to at most one subscription at a time") + return + } + f = fields[0] + + var in []reflect.Value + if f.field.HasContext { + in = append(in, reflect.ValueOf(ctx)) + } + if f.field.ArgsPacker != nil { + in = append(in, f.field.PackedArgs) + } + callOut := f.resolver.Method(f.field.MethodIndex).Call(in) + result = callOut[0] + + if f.field.HasError && !callOut[1].IsNil() { + switch resolverErr := callOut[1].Interface().(type) { + case *errors.QueryError: + err = resolverErr + case error: + err = errors.Errorf("%s", resolverErr) + err.ResolverError = resolverErr + default: + panic(fmt.Errorf("can only deal with *QueryError and error types, got %T", resolverErr)) + } + } + }() + + // Handles the case where the locally executed func above panicked + if len(r.Request.Errs) > 0 { + return sendAndReturnClosed(&Response{Errors: r.Request.Errs}) + } + + if f == nil { + return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{err}}) + } + + if err != nil { + if _, nonNullChild := f.field.Type.(*types.NonNull); nonNullChild { + return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{err}}) + } + return sendAndReturnClosed(&Response{Data: []byte(fmt.Sprintf(`{"%s":null}`, f.field.Alias)), Errors: []*errors.QueryError{err}}) + } + + if ctxErr := ctx.Err(); ctxErr != nil { + return sendAndReturnClosed(&Response{Errors: []*errors.QueryError{errors.Errorf("%s", ctxErr)}}) + } + + c := make(chan *Response) + // TODO: handle resolver nil channel better? + if result.IsZero() { + close(c) + return c + } + + go func() { + for { + // Check subscription context + chosen, resp, ok := reflect.Select([]reflect.SelectCase{ + { + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(ctx.Done()), + }, + { + Dir: reflect.SelectRecv, + Chan: result, + }, + }) + switch chosen { + // subscription context done + case 0: + close(c) + return + // upstream received + case 1: + // upstream closed + if !ok { + close(c) + return + } + + subR := &Request{ + Request: selected.Request{ + Doc: r.Request.Doc, + Vars: r.Request.Vars, + Schema: r.Request.Schema, + }, + Limiter: r.Limiter, + Tracer: r.Tracer, + Logger: r.Logger, + } + var out bytes.Buffer + func() { + timeout := r.SubscribeResolverTimeout + if timeout == 0 { + timeout = time.Second + } + + subCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + + // resolve response + func() { + defer subR.handlePanic(subCtx) + + var buf bytes.Buffer + subR.execSelectionSet(subCtx, f.sels, f.field.Type, &pathSegment{nil, f.field.Alias}, s, resp, &buf) + + propagateChildError := false + if _, nonNullChild := f.field.Type.(*types.NonNull); nonNullChild && resolvedToNull(&buf) { + propagateChildError = true + } + + if !propagateChildError { + out.WriteString(fmt.Sprintf(`{"%s":`, f.field.Alias)) + out.Write(buf.Bytes()) + out.WriteString(`}`) + } + }() + + if err := subCtx.Err(); err != nil { + c <- &Response{Errors: []*errors.QueryError{errors.Errorf("%s", err)}} + return + } + + // Send response within timeout + // TODO: maybe block until sent? + select { + case <-subCtx.Done(): + case c <- &Response{Data: out.Bytes(), Errors: subR.Errs}: + } + }() + } + } + }() + + return c +} + +func sendAndReturnClosed(resp *Response) chan *Response { + c := make(chan *Response, 1) + c <- resp + close(c) + return c +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go new file mode 100644 index 000000000..ca0400cdf --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/query/query.go @@ -0,0 +1,156 @@ +package query + +import ( + "fmt" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/types" +) + +const ( + Query types.OperationType = "QUERY" + Mutation types.OperationType = "MUTATION" + Subscription types.OperationType = "SUBSCRIPTION" +) + +func Parse(queryString string) (*types.ExecutableDefinition, *errors.QueryError) { + l := common.NewLexer(queryString, false) + + var execDef *types.ExecutableDefinition + err := l.CatchSyntaxError(func() { execDef = parseExecutableDefinition(l) }) + if err != nil { + return nil, err + } + + return execDef, nil +} + +func parseExecutableDefinition(l *common.Lexer) *types.ExecutableDefinition { + ed := &types.ExecutableDefinition{} + l.ConsumeWhitespace() + for l.Peek() != scanner.EOF { + if l.Peek() == '{' { + op := &types.OperationDefinition{Type: Query, Loc: l.Location()} + op.Selections = parseSelectionSet(l) + ed.Operations = append(ed.Operations, op) + continue + } + + loc := l.Location() + switch x := l.ConsumeIdent(); x { + case "query": + op := parseOperation(l, Query) + op.Loc = loc + ed.Operations = append(ed.Operations, op) + + case "mutation": + ed.Operations = append(ed.Operations, parseOperation(l, Mutation)) + + case "subscription": + ed.Operations = append(ed.Operations, parseOperation(l, Subscription)) + + case "fragment": + frag := parseFragment(l) + frag.Loc = loc + ed.Fragments = append(ed.Fragments, frag) + + default: + l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "fragment"`, x)) + } + } + return ed +} + +func parseOperation(l *common.Lexer, opType types.OperationType) *types.OperationDefinition { + op := &types.OperationDefinition{Type: opType} + op.Name.Loc = l.Location() + if l.Peek() == scanner.Ident { + op.Name = l.ConsumeIdentWithLoc() + } + op.Directives = common.ParseDirectives(l) + if l.Peek() == '(' { + l.ConsumeToken('(') + for l.Peek() != ')' { + loc := l.Location() + l.ConsumeToken('$') + iv := common.ParseInputValue(l) + iv.Loc = loc + op.Vars = append(op.Vars, iv) + } + l.ConsumeToken(')') + } + op.Selections = parseSelectionSet(l) + return op +} + +func parseFragment(l *common.Lexer) *types.FragmentDefinition { + f := &types.FragmentDefinition{} + f.Name = l.ConsumeIdentWithLoc() + l.ConsumeKeyword("on") + f.On = types.TypeName{Ident: l.ConsumeIdentWithLoc()} + f.Directives = common.ParseDirectives(l) + f.Selections = parseSelectionSet(l) + return f +} + +func parseSelectionSet(l *common.Lexer) []types.Selection { + var sels []types.Selection + l.ConsumeToken('{') + for l.Peek() != '}' { + sels = append(sels, parseSelection(l)) + } + l.ConsumeToken('}') + return sels +} + +func parseSelection(l *common.Lexer) types.Selection { + if l.Peek() == '.' { + return parseSpread(l) + } + return parseFieldDef(l) +} + +func parseFieldDef(l *common.Lexer) *types.Field { + f := &types.Field{} + f.Alias = l.ConsumeIdentWithLoc() + f.Name = f.Alias + if l.Peek() == ':' { + l.ConsumeToken(':') + f.Name = l.ConsumeIdentWithLoc() + } + if l.Peek() == '(' { + f.Arguments = common.ParseArgumentList(l) + } + f.Directives = common.ParseDirectives(l) + if l.Peek() == '{' { + f.SelectionSetLoc = l.Location() + f.SelectionSet = parseSelectionSet(l) + } + return f +} + +func parseSpread(l *common.Lexer) types.Selection { + loc := l.Location() + l.ConsumeToken('.') + l.ConsumeToken('.') + l.ConsumeToken('.') + + f := &types.InlineFragment{Loc: loc} + if l.Peek() == scanner.Ident { + ident := l.ConsumeIdentWithLoc() + if ident.Name != "on" { + fs := &types.FragmentSpread{ + Name: ident, + Loc: loc, + } + fs.Directives = common.ParseDirectives(l) + return fs + } + f.On = types.TypeName{Ident: l.ConsumeIdentWithLoc()} + } + f.Directives = common.ParseDirectives(l) + f.Selections = parseSelectionSet(l) + return f +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go new file mode 100644 index 000000000..9f5bba56d --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/schema/meta.go @@ -0,0 +1,203 @@ +package schema + +import ( + "github.com/graph-gophers/graphql-go/types" +) + +func init() { + _ = newMeta() +} + +// newMeta initializes an instance of the meta Schema. +func newMeta() *types.Schema { + s := &types.Schema{ + EntryPointNames: make(map[string]string), + Types: make(map[string]types.NamedType), + Directives: make(map[string]*types.DirectiveDefinition), + } + + err := Parse(s, metaSrc, false) + if err != nil { + panic(err) + } + return s +} + +var metaSrc = ` + # The ` + "`" + `Int` + "`" + ` scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1. + scalar Int + + # The ` + "`" + `Float` + "`" + ` scalar type represents signed double-precision fractional values as specified by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point). + scalar Float + + # The ` + "`" + `String` + "`" + ` scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text. + scalar String + + # The ` + "`" + `Boolean` + "`" + ` scalar type represents ` + "`" + `true` + "`" + ` or ` + "`" + `false` + "`" + `. + scalar Boolean + + # The ` + "`" + `ID` + "`" + ` scalar type represents a unique identifier, often used to refetch an object or as key for a cache. The ID type appears in a JSON response as a String; however, it is not intended to be human-readable. When expected as an input type, any string (such as ` + "`" + `"4"` + "`" + `) or integer (such as ` + "`" + `4` + "`" + `) input value will be accepted as an ID. + scalar ID + + # Directs the executor to include this field or fragment only when the ` + "`" + `if` + "`" + ` argument is true. + directive @include( + # Included when true. + if: Boolean! + ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT + + # Directs the executor to skip this field or fragment when the ` + "`" + `if` + "`" + ` argument is true. + directive @skip( + # Skipped when true. + if: Boolean! + ) on FIELD | FRAGMENT_SPREAD | INLINE_FRAGMENT + + # Marks an element of a GraphQL schema as no longer supported. + directive @deprecated( + # Explains why this element was deprecated, usually also including a suggestion + # for how to access supported similar data. Formatted in + # [Markdown](https://daringfireball.net/projects/markdown/). + reason: String = "No longer supported" + ) on FIELD_DEFINITION | ENUM_VALUE + + # A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document. + # + # In some cases, you need to provide options to alter GraphQL's execution behavior + # in ways field arguments will not suffice, such as conditionally including or + # skipping a field. Directives provide this by describing additional information + # to the executor. + type __Directive { + name: String! + description: String + locations: [__DirectiveLocation!]! + args: [__InputValue!]! + } + + # A Directive can be adjacent to many parts of the GraphQL language, a + # __DirectiveLocation describes one such possible adjacencies. + enum __DirectiveLocation { + # Location adjacent to a query operation. + QUERY + # Location adjacent to a mutation operation. + MUTATION + # Location adjacent to a subscription operation. + SUBSCRIPTION + # Location adjacent to a field. + FIELD + # Location adjacent to a fragment definition. + FRAGMENT_DEFINITION + # Location adjacent to a fragment spread. + FRAGMENT_SPREAD + # Location adjacent to an inline fragment. + INLINE_FRAGMENT + # Location adjacent to a schema definition. + SCHEMA + # Location adjacent to a scalar definition. + SCALAR + # Location adjacent to an object type definition. + OBJECT + # Location adjacent to a field definition. + FIELD_DEFINITION + # Location adjacent to an argument definition. + ARGUMENT_DEFINITION + # Location adjacent to an interface definition. + INTERFACE + # Location adjacent to a union definition. + UNION + # Location adjacent to an enum definition. + ENUM + # Location adjacent to an enum value definition. + ENUM_VALUE + # Location adjacent to an input object type definition. + INPUT_OBJECT + # Location adjacent to an input object field definition. + INPUT_FIELD_DEFINITION + } + + # One possible value for a given Enum. Enum values are unique values, not a + # placeholder for a string or numeric value. However an Enum value is returned in + # a JSON response as a string. + type __EnumValue { + name: String! + description: String + isDeprecated: Boolean! + deprecationReason: String + } + + # Object and Interface types are described by a list of Fields, each of which has + # a name, potentially a list of arguments, and a return type. + type __Field { + name: String! + description: String + args: [__InputValue!]! + type: __Type! + isDeprecated: Boolean! + deprecationReason: String + } + + # Arguments provided to Fields or Directives and the input fields of an + # InputObject are represented as Input Values which describe their type and + # optionally a default value. + type __InputValue { + name: String! + description: String + type: __Type! + # A GraphQL-formatted string representing the default value for this input value. + defaultValue: String + } + + # A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all + # available types and directives on the server, as well as the entry points for + # query, mutation, and subscription operations. + type __Schema { + # A list of all types supported by this server. + types: [__Type!]! + # The type that query operations will be rooted at. + queryType: __Type! + # If this server supports mutation, the type that mutation operations will be rooted at. + mutationType: __Type + # If this server support subscription, the type that subscription operations will be rooted at. + subscriptionType: __Type + # A list of all directives supported by this server. + directives: [__Directive!]! + } + + # The fundamental unit of any GraphQL Schema is the type. There are many kinds of + # types in GraphQL as represented by the ` + "`" + `__TypeKind` + "`" + ` enum. + # + # Depending on the kind of a type, certain fields describe information about that + # type. Scalar types provide no information beyond a name and description, while + # Enum types provide their values. Object and Interface types provide the fields + # they describe. Abstract types, Union and Interface, provide the Object types + # possible at runtime. List and NonNull types compose other types. + type __Type { + kind: __TypeKind! + name: String + description: String + fields(includeDeprecated: Boolean = false): [__Field!] + interfaces: [__Type!] + possibleTypes: [__Type!] + enumValues(includeDeprecated: Boolean = false): [__EnumValue!] + inputFields: [__InputValue!] + ofType: __Type + } + + # An enum describing what kind of type a given ` + "`" + `__Type` + "`" + ` is. + enum __TypeKind { + # Indicates this type is a scalar. + SCALAR + # Indicates this type is an object. ` + "`" + `fields` + "`" + ` and ` + "`" + `interfaces` + "`" + ` are valid fields. + OBJECT + # Indicates this type is an interface. ` + "`" + `fields` + "`" + ` and ` + "`" + `possibleTypes` + "`" + ` are valid fields. + INTERFACE + # Indicates this type is a union. ` + "`" + `possibleTypes` + "`" + ` is a valid field. + UNION + # Indicates this type is an enum. ` + "`" + `enumValues` + "`" + ` is a valid field. + ENUM + # Indicates this type is an input object. ` + "`" + `inputFields` + "`" + ` is a valid field. + INPUT_OBJECT + # Indicates this type is a list. ` + "`" + `ofType` + "`" + ` is a valid field. + LIST + # Indicates this type is a non-null. ` + "`" + `ofType` + "`" + ` is a valid field. + NON_NULL + } +` diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go new file mode 100644 index 000000000..fb301c46b --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/schema/schema.go @@ -0,0 +1,586 @@ +package schema + +import ( + "fmt" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/types" +) + +// New initializes an instance of Schema. +func New() *types.Schema { + s := &types.Schema{ + EntryPointNames: make(map[string]string), + Types: make(map[string]types.NamedType), + Directives: make(map[string]*types.DirectiveDefinition), + } + m := newMeta() + for n, t := range m.Types { + s.Types[n] = t + } + for n, d := range m.Directives { + s.Directives[n] = d + } + return s +} + +func Parse(s *types.Schema, schemaString string, useStringDescriptions bool) error { + l := common.NewLexer(schemaString, useStringDescriptions) + err := l.CatchSyntaxError(func() { parseSchema(s, l) }) + if err != nil { + return err + } + + if err := mergeExtensions(s); err != nil { + return err + } + + for _, t := range s.Types { + if err := resolveNamedType(s, t); err != nil { + return err + } + } + for _, d := range s.Directives { + for _, arg := range d.Arguments { + t, err := common.ResolveType(arg.Type, s.Resolve) + if err != nil { + return err + } + arg.Type = t + } + } + + // https://graphql.github.io/graphql-spec/June2018/#sec-Root-Operation-Types + // > While any type can be the root operation type for a GraphQL operation, the type system definition language can + // > omit the schema definition when the query, mutation, and subscription root types are named Query, Mutation, + // > and Subscription respectively. + if len(s.EntryPointNames) == 0 { + if _, ok := s.Types["Query"]; ok { + s.EntryPointNames["query"] = "Query" + } + if _, ok := s.Types["Mutation"]; ok { + s.EntryPointNames["mutation"] = "Mutation" + } + if _, ok := s.Types["Subscription"]; ok { + s.EntryPointNames["subscription"] = "Subscription" + } + } + s.EntryPoints = make(map[string]types.NamedType) + for key, name := range s.EntryPointNames { + t, ok := s.Types[name] + if !ok { + return errors.Errorf("type %q not found", name) + } + s.EntryPoints[key] = t + } + + // Interface types need validation: https://spec.graphql.org/draft/#sec-Interfaces.Interfaces-Implementing-Interfaces + for _, typeDef := range s.Types { + switch t := typeDef.(type) { + case *types.InterfaceTypeDefinition: + for i, implements := range t.Interfaces { + typ, ok := s.Types[implements.Name] + if !ok { + return errors.Errorf("interface %q not found", implements) + } + inteface, ok := typ.(*types.InterfaceTypeDefinition) + if !ok { + return errors.Errorf("type %q is not an interface", inteface) + } + + for _, f := range inteface.Fields.Names() { + if t.Fields.Get(f) == nil { + return errors.Errorf("interface %q expects field %q but %q does not provide it", inteface.Name, f, t.Name) + } + } + + t.Interfaces[i] = inteface + } + default: + continue + } + } + + for _, obj := range s.Objects { + obj.Interfaces = make([]*types.InterfaceTypeDefinition, len(obj.InterfaceNames)) + if err := resolveDirectives(s, obj.Directives, "OBJECT"); err != nil { + return err + } + for _, field := range obj.Fields { + if err := resolveDirectives(s, field.Directives, "FIELD_DEFINITION"); err != nil { + return err + } + } + for i, intfName := range obj.InterfaceNames { + t, ok := s.Types[intfName] + if !ok { + return errors.Errorf("interface %q not found", intfName) + } + intf, ok := t.(*types.InterfaceTypeDefinition) + if !ok { + return errors.Errorf("type %q is not an interface", intfName) + } + for _, f := range intf.Fields.Names() { + if obj.Fields.Get(f) == nil { + return errors.Errorf("interface %q expects field %q but %q does not provide it", intfName, f, obj.Name) + } + } + obj.Interfaces[i] = intf + intf.PossibleTypes = append(intf.PossibleTypes, obj) + } + } + + for _, union := range s.Unions { + if err := resolveDirectives(s, union.Directives, "UNION"); err != nil { + return err + } + union.UnionMemberTypes = make([]*types.ObjectTypeDefinition, len(union.TypeNames)) + for i, name := range union.TypeNames { + t, ok := s.Types[name] + if !ok { + return errors.Errorf("object type %q not found", name) + } + obj, ok := t.(*types.ObjectTypeDefinition) + if !ok { + return errors.Errorf("type %q is not an object", name) + } + union.UnionMemberTypes[i] = obj + } + } + + for _, enum := range s.Enums { + if err := resolveDirectives(s, enum.Directives, "ENUM"); err != nil { + return err + } + for _, value := range enum.EnumValuesDefinition { + if err := resolveDirectives(s, value.Directives, "ENUM_VALUE"); err != nil { + return err + } + } + } + + return nil +} + +func ParseSchema(schemaString string, useStringDescriptions bool) (*types.Schema, error) { + s := New() + err := Parse(s, schemaString, useStringDescriptions) + return s, err +} + +func mergeExtensions(s *types.Schema) error { + for _, ext := range s.Extensions { + typ := s.Types[ext.Type.TypeName()] + if typ == nil { + return fmt.Errorf("trying to extend unknown type %q", ext.Type.TypeName()) + } + + if typ.Kind() != ext.Type.Kind() { + return fmt.Errorf("trying to extend type %q with type %q", typ.Kind(), ext.Type.Kind()) + } + + switch og := typ.(type) { + case *types.ObjectTypeDefinition: + e := ext.Type.(*types.ObjectTypeDefinition) + + for _, field := range e.Fields { + if og.Fields.Get(field.Name) != nil { + return fmt.Errorf("extended field %q already exists", field.Name) + } + } + og.Fields = append(og.Fields, e.Fields...) + + for _, en := range e.InterfaceNames { + for _, on := range og.InterfaceNames { + if on == en { + return fmt.Errorf("interface %q implemented in the extension is already implemented in %q", on, og.Name) + } + } + } + og.InterfaceNames = append(og.InterfaceNames, e.InterfaceNames...) + + case *types.InputObject: + e := ext.Type.(*types.InputObject) + + for _, field := range e.Values { + if og.Values.Get(field.Name.Name) != nil { + return fmt.Errorf("extended field %q already exists", field.Name) + } + } + og.Values = append(og.Values, e.Values...) + + case *types.InterfaceTypeDefinition: + e := ext.Type.(*types.InterfaceTypeDefinition) + + for _, field := range e.Fields { + if og.Fields.Get(field.Name) != nil { + return fmt.Errorf("extended field %s already exists", field.Name) + } + } + og.Fields = append(og.Fields, e.Fields...) + + case *types.Union: + e := ext.Type.(*types.Union) + + for _, en := range e.TypeNames { + for _, on := range og.TypeNames { + if on == en { + return fmt.Errorf("union type %q already declared in %q", on, og.Name) + } + } + } + og.TypeNames = append(og.TypeNames, e.TypeNames...) + + case *types.EnumTypeDefinition: + e := ext.Type.(*types.EnumTypeDefinition) + + for _, en := range e.EnumValuesDefinition { + for _, on := range og.EnumValuesDefinition { + if on.EnumValue == en.EnumValue { + return fmt.Errorf("enum value %q already declared in %q", on.EnumValue, og.Name) + } + } + } + og.EnumValuesDefinition = append(og.EnumValuesDefinition, e.EnumValuesDefinition...) + default: + return fmt.Errorf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union" or "input"`, og.TypeName()) + } + } + + return nil +} + +func resolveNamedType(s *types.Schema, t types.NamedType) error { + switch t := t.(type) { + case *types.ObjectTypeDefinition: + for _, f := range t.Fields { + if err := resolveField(s, f); err != nil { + return err + } + } + case *types.InterfaceTypeDefinition: + for _, f := range t.Fields { + if err := resolveField(s, f); err != nil { + return err + } + } + case *types.InputObject: + if err := resolveInputObject(s, t.Values); err != nil { + return err + } + } + return nil +} + +func resolveField(s *types.Schema, f *types.FieldDefinition) error { + t, err := common.ResolveType(f.Type, s.Resolve) + if err != nil { + return err + } + f.Type = t + if err := resolveDirectives(s, f.Directives, "FIELD_DEFINITION"); err != nil { + return err + } + return resolveInputObject(s, f.Arguments) +} + +func resolveDirectives(s *types.Schema, directives types.DirectiveList, loc string) error { + for _, d := range directives { + dirName := d.Name.Name + dd, ok := s.Directives[dirName] + if !ok { + return errors.Errorf("directive %q not found", dirName) + } + validLoc := false + for _, l := range dd.Locations { + if l == loc { + validLoc = true + break + } + } + if !validLoc { + return errors.Errorf("invalid location %q for directive %q (must be one of %v)", loc, dirName, dd.Locations) + } + for _, arg := range d.Arguments { + if dd.Arguments.Get(arg.Name.Name) == nil { + return errors.Errorf("invalid argument %q for directive %q", arg.Name.Name, dirName) + } + } + for _, arg := range dd.Arguments { + if _, ok := d.Arguments.Get(arg.Name.Name); !ok { + d.Arguments = append(d.Arguments, &types.Argument{Name: arg.Name, Value: arg.Default}) + } + } + } + return nil +} + +func resolveInputObject(s *types.Schema, values types.ArgumentsDefinition) error { + for _, v := range values { + t, err := common.ResolveType(v.Type, s.Resolve) + if err != nil { + return err + } + v.Type = t + } + return nil +} + +func parseSchema(s *types.Schema, l *common.Lexer) { + l.ConsumeWhitespace() + + for l.Peek() != scanner.EOF { + desc := l.DescComment() + switch x := l.ConsumeIdent(); x { + + case "schema": + l.ConsumeToken('{') + for l.Peek() != '}' { + + name := l.ConsumeIdent() + l.ConsumeToken(':') + typ := l.ConsumeIdent() + s.EntryPointNames[name] = typ + } + l.ConsumeToken('}') + + case "type": + obj := parseObjectDef(l) + obj.Desc = desc + s.Types[obj.Name] = obj + s.Objects = append(s.Objects, obj) + + case "interface": + iface := parseInterfaceDef(l) + iface.Desc = desc + s.Types[iface.Name] = iface + + case "union": + union := parseUnionDef(l) + union.Desc = desc + s.Types[union.Name] = union + s.Unions = append(s.Unions, union) + + case "enum": + enum := parseEnumDef(l) + enum.Desc = desc + s.Types[enum.Name] = enum + s.Enums = append(s.Enums, enum) + + case "input": + input := parseInputDef(l) + input.Desc = desc + s.Types[input.Name] = input + + case "scalar": + loc := l.Location() + name := l.ConsumeIdent() + directives := common.ParseDirectives(l) + s.Types[name] = &types.ScalarTypeDefinition{Name: name, Desc: desc, Directives: directives, Loc: loc} + + case "directive": + directive := parseDirectiveDef(l) + directive.Desc = desc + s.Directives[directive.Name] = directive + + case "extend": + parseExtension(s, l) + + default: + // TODO: Add support for type extensions. + l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union", "input", "scalar" or "directive"`, x)) + } + } +} + +func parseObjectDef(l *common.Lexer) *types.ObjectTypeDefinition { + object := &types.ObjectTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()} + + for { + if l.Peek() == '{' { + break + } + + if l.Peek() == '@' { + object.Directives = common.ParseDirectives(l) + continue + } + + if l.Peek() == scanner.Ident { + l.ConsumeKeyword("implements") + + for l.Peek() != '{' && l.Peek() != '@' { + if l.Peek() == '&' { + l.ConsumeToken('&') + } + + object.InterfaceNames = append(object.InterfaceNames, l.ConsumeIdent()) + } + continue + } + + } + l.ConsumeToken('{') + object.Fields = parseFieldsDef(l) + l.ConsumeToken('}') + + return object + +} + +func parseInterfaceDef(l *common.Lexer) *types.InterfaceTypeDefinition { + i := &types.InterfaceTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()} + + if l.Peek() == scanner.Ident { + l.ConsumeKeyword("implements") + i.Interfaces = append(i.Interfaces, &types.InterfaceTypeDefinition{Name: l.ConsumeIdent()}) + + for l.Peek() == '&' { + l.ConsumeToken('&') + i.Interfaces = append(i.Interfaces, &types.InterfaceTypeDefinition{Name: l.ConsumeIdent()}) + } + } + + i.Directives = common.ParseDirectives(l) + + l.ConsumeToken('{') + i.Fields = parseFieldsDef(l) + l.ConsumeToken('}') + + return i +} + +func parseUnionDef(l *common.Lexer) *types.Union { + union := &types.Union{Loc: l.Location(), Name: l.ConsumeIdent()} + + union.Directives = common.ParseDirectives(l) + l.ConsumeToken('=') + union.TypeNames = []string{l.ConsumeIdent()} + for l.Peek() == '|' { + l.ConsumeToken('|') + union.TypeNames = append(union.TypeNames, l.ConsumeIdent()) + } + + return union +} + +func parseInputDef(l *common.Lexer) *types.InputObject { + i := &types.InputObject{} + i.Loc = l.Location() + i.Name = l.ConsumeIdent() + i.Directives = common.ParseDirectives(l) + l.ConsumeToken('{') + for l.Peek() != '}' { + i.Values = append(i.Values, common.ParseInputValue(l)) + } + l.ConsumeToken('}') + return i +} + +func parseEnumDef(l *common.Lexer) *types.EnumTypeDefinition { + enum := &types.EnumTypeDefinition{Loc: l.Location(), Name: l.ConsumeIdent()} + + enum.Directives = common.ParseDirectives(l) + l.ConsumeToken('{') + for l.Peek() != '}' { + v := &types.EnumValueDefinition{ + Desc: l.DescComment(), + Loc: l.Location(), + EnumValue: l.ConsumeIdent(), + Directives: common.ParseDirectives(l), + } + + enum.EnumValuesDefinition = append(enum.EnumValuesDefinition, v) + } + l.ConsumeToken('}') + return enum +} +func parseDirectiveDef(l *common.Lexer) *types.DirectiveDefinition { + l.ConsumeToken('@') + loc := l.Location() + d := &types.DirectiveDefinition{Name: l.ConsumeIdent(), Loc: loc} + + if l.Peek() == '(' { + l.ConsumeToken('(') + for l.Peek() != ')' { + v := common.ParseInputValue(l) + d.Arguments = append(d.Arguments, v) + } + l.ConsumeToken(')') + } + + l.ConsumeKeyword("on") + + for { + loc := l.ConsumeIdent() + d.Locations = append(d.Locations, loc) + if l.Peek() != '|' { + break + } + l.ConsumeToken('|') + } + return d +} + +func parseExtension(s *types.Schema, l *common.Lexer) { + loc := l.Location() + switch x := l.ConsumeIdent(); x { + case "schema": + l.ConsumeToken('{') + for l.Peek() != '}' { + name := l.ConsumeIdent() + l.ConsumeToken(':') + typ := l.ConsumeIdent() + s.EntryPointNames[name] = typ + } + l.ConsumeToken('}') + + case "type": + obj := parseObjectDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: obj, Loc: loc}) + + case "interface": + iface := parseInterfaceDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: iface, Loc: loc}) + + case "union": + union := parseUnionDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: union, Loc: loc}) + + case "enum": + enum := parseEnumDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: enum, Loc: loc}) + + case "input": + input := parseInputDef(l) + s.Extensions = append(s.Extensions, &types.Extension{Type: input, Loc: loc}) + + default: + // TODO: Add ScalarTypeDefinition when adding directives + l.SyntaxError(fmt.Sprintf(`unexpected %q, expecting "schema", "type", "enum", "interface", "union" or "input"`, x)) + } +} + +func parseFieldsDef(l *common.Lexer) types.FieldsDefinition { + var fields types.FieldsDefinition + for l.Peek() != '}' { + f := &types.FieldDefinition{} + f.Desc = l.DescComment() + f.Loc = l.Location() + f.Name = l.ConsumeIdent() + if l.Peek() == '(' { + l.ConsumeToken('(') + for l.Peek() != ')' { + f.Arguments = append(f.Arguments, common.ParseInputValue(l)) + } + l.ConsumeToken(')') + } + l.ConsumeToken(':') + f.Type = common.ParseType(l) + f.Directives = common.ParseDirectives(l) + fields = append(fields, f) + } + return fields +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go new file mode 100644 index 000000000..9702b5f52 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/validation/suggestion.go @@ -0,0 +1,71 @@ +package validation + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +func makeSuggestion(prefix string, options []string, input string) string { + var selected []string + distances := make(map[string]int) + for _, opt := range options { + distance := levenshteinDistance(input, opt) + threshold := max(len(input)/2, max(len(opt)/2, 1)) + if distance < threshold { + selected = append(selected, opt) + distances[opt] = distance + } + } + + if len(selected) == 0 { + return "" + } + sort.Slice(selected, func(i, j int) bool { + return distances[selected[i]] < distances[selected[j]] + }) + + parts := make([]string, len(selected)) + for i, opt := range selected { + parts[i] = strconv.Quote(opt) + } + if len(parts) > 1 { + parts[len(parts)-1] = "or " + parts[len(parts)-1] + } + return fmt.Sprintf(" %s %s?", prefix, strings.Join(parts, ", ")) +} + +func levenshteinDistance(s1, s2 string) int { + column := make([]int, len(s1)+1) + for y := range s1 { + column[y+1] = y + 1 + } + for x, rx := range s2 { + column[0] = x + 1 + lastdiag := x + for y, ry := range s1 { + olddiag := column[y+1] + if rx != ry { + lastdiag++ + } + column[y+1] = min(column[y+1]+1, min(column[y]+1, lastdiag)) + lastdiag = olddiag + } + } + return column[len(s1)] +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go new file mode 100644 index 000000000..e36726388 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/internal/validation/validation.go @@ -0,0 +1,980 @@ +package validation + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/types" +) + +type varSet map[*types.InputValueDefinition]struct{} + +type selectionPair struct{ a, b types.Selection } + +type nameSet map[string]errors.Location + +type fieldInfo struct { + sf *types.FieldDefinition + parent types.NamedType +} + +type context struct { + schema *types.Schema + doc *types.ExecutableDefinition + errs []*errors.QueryError + opErrs map[*types.OperationDefinition][]*errors.QueryError + usedVars map[*types.OperationDefinition]varSet + fieldMap map[*types.Field]fieldInfo + overlapValidated map[selectionPair]struct{} + maxDepth int +} + +func (c *context) addErr(loc errors.Location, rule string, format string, a ...interface{}) { + c.addErrMultiLoc([]errors.Location{loc}, rule, format, a...) +} + +func (c *context) addErrMultiLoc(locs []errors.Location, rule string, format string, a ...interface{}) { + c.errs = append(c.errs, &errors.QueryError{ + Message: fmt.Sprintf(format, a...), + Locations: locs, + Rule: rule, + }) +} + +type opContext struct { + *context + ops []*types.OperationDefinition +} + +func newContext(s *types.Schema, doc *types.ExecutableDefinition, maxDepth int) *context { + return &context{ + schema: s, + doc: doc, + opErrs: make(map[*types.OperationDefinition][]*errors.QueryError), + usedVars: make(map[*types.OperationDefinition]varSet), + fieldMap: make(map[*types.Field]fieldInfo), + overlapValidated: make(map[selectionPair]struct{}), + maxDepth: maxDepth, + } +} + +func Validate(s *types.Schema, doc *types.ExecutableDefinition, variables map[string]interface{}, maxDepth int) []*errors.QueryError { + c := newContext(s, doc, maxDepth) + + opNames := make(nameSet) + fragUsedBy := make(map[*types.FragmentDefinition][]*types.OperationDefinition) + for _, op := range doc.Operations { + c.usedVars[op] = make(varSet) + opc := &opContext{c, []*types.OperationDefinition{op}} + + // Check if max depth is exceeded, if it's set. If max depth is exceeded, + // don't continue to validate the document and exit early. + if validateMaxDepth(opc, op.Selections, nil, 1) { + return c.errs + } + + if op.Name.Name == "" && len(doc.Operations) != 1 { + c.addErr(op.Loc, "LoneAnonymousOperation", "This anonymous operation must be the only defined operation.") + } + if op.Name.Name != "" { + validateName(c, opNames, op.Name, "UniqueOperationNames", "operation") + } + + validateDirectives(opc, string(op.Type), op.Directives) + + varNames := make(nameSet) + for _, v := range op.Vars { + validateName(c, varNames, v.Name, "UniqueVariableNames", "variable") + + t := resolveType(c, v.Type) + if !canBeInput(t) { + c.addErr(v.TypeLoc, "VariablesAreInputTypes", "Variable %q cannot be non-input type %q.", "$"+v.Name.Name, t) + } + validateValue(opc, v, variables[v.Name.Name], t) + + if v.Default != nil { + validateLiteral(opc, v.Default) + + if t != nil { + if nn, ok := t.(*types.NonNull); ok { + c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q is required and will not use the default value. Perhaps you meant to use type %q.", "$"+v.Name.Name, t, nn.OfType) + } + + if ok, reason := validateValueType(opc, v.Default, t); !ok { + c.addErr(v.Default.Location(), "DefaultValuesOfCorrectType", "Variable %q of type %q has invalid default value %s.\n%s", "$"+v.Name.Name, t, v.Default, reason) + } + } + } + } + + var entryPoint types.NamedType + switch op.Type { + case query.Query: + entryPoint = s.EntryPoints["query"] + case query.Mutation: + entryPoint = s.EntryPoints["mutation"] + case query.Subscription: + entryPoint = s.EntryPoints["subscription"] + default: + panic("unreachable") + } + + validateSelectionSet(opc, op.Selections, entryPoint) + + fragUsed := make(map[*types.FragmentDefinition]struct{}) + markUsedFragments(c, op.Selections, fragUsed) + for frag := range fragUsed { + fragUsedBy[frag] = append(fragUsedBy[frag], op) + } + } + + fragNames := make(nameSet) + fragVisited := make(map[*types.FragmentDefinition]struct{}) + for _, frag := range doc.Fragments { + opc := &opContext{c, fragUsedBy[frag]} + + validateName(c, fragNames, frag.Name, "UniqueFragmentNames", "fragment") + validateDirectives(opc, "FRAGMENT_DEFINITION", frag.Directives) + + t := unwrapType(resolveType(c, &frag.On)) + // continue even if t is nil + if t != nil && !canBeFragment(t) { + c.addErr(frag.On.Loc, "FragmentsOnCompositeTypes", "Fragment %q cannot condition on non composite type %q.", frag.Name.Name, t) + continue + } + + validateSelectionSet(opc, frag.Selections, t) + + if _, ok := fragVisited[frag]; !ok { + detectFragmentCycle(c, frag.Selections, fragVisited, nil, map[string]int{frag.Name.Name: 0}) + } + } + + for _, frag := range doc.Fragments { + if len(fragUsedBy[frag]) == 0 { + c.addErr(frag.Loc, "NoUnusedFragments", "Fragment %q is never used.", frag.Name.Name) + } + } + + for _, op := range doc.Operations { + c.errs = append(c.errs, c.opErrs[op]...) + + opUsedVars := c.usedVars[op] + for _, v := range op.Vars { + if _, ok := opUsedVars[v]; !ok { + opSuffix := "" + if op.Name.Name != "" { + opSuffix = fmt.Sprintf(" in operation %q", op.Name.Name) + } + c.addErr(v.Loc, "NoUnusedVariables", "Variable %q is never used%s.", "$"+v.Name.Name, opSuffix) + } + } + } + + return c.errs +} + +func validateValue(c *opContext, v *types.InputValueDefinition, val interface{}, t types.Type) { + switch t := t.(type) { + case *types.NonNull: + if val == nil { + c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid value null.\nExpected type \"%s\", found null.", v.Name.Name, t) + return + } + validateValue(c, v, val, t.OfType) + case *types.List: + if val == nil { + return + } + vv, ok := val.([]interface{}) + if !ok { + // Input coercion rules allow single items without wrapping array + validateValue(c, v, val, t.OfType) + return + } + for _, elem := range vv { + validateValue(c, v, elem, t.OfType) + } + case *types.EnumTypeDefinition: + if val == nil { + return + } + e, ok := val.(string) + if !ok { + c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid type %T.\nExpected type \"%s\", found %v.", v.Name.Name, val, t, val) + return + } + for _, option := range t.EnumValuesDefinition { + if option.EnumValue == e { + return + } + } + c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid value %s.\nExpected type \"%s\", found %s.", v.Name.Name, e, t, e) + case *types.InputObject: + if val == nil { + return + } + in, ok := val.(map[string]interface{}) + if !ok { + c.addErr(v.Loc, "VariablesOfCorrectType", "Variable \"%s\" has invalid type %T.\nExpected type \"%s\", found %s.", v.Name.Name, val, t, val) + return + } + for _, f := range t.Values { + fieldVal := in[f.Name.Name] + validateValue(c, f, fieldVal, f.Type) + } + } +} + +// validates the query doesn't go deeper than maxDepth (if set). Returns whether +// or not query validated max depth to avoid excessive recursion. +// +// The visited map is necessary to ensure that max depth validation does not get stuck in cyclical +// fragment spreads. +func validateMaxDepth(c *opContext, sels []types.Selection, visited map[*types.FragmentDefinition]struct{}, depth int) bool { + // maxDepth checking is turned off when maxDepth is 0 + if c.maxDepth == 0 { + return false + } + + exceededMaxDepth := false + if visited == nil { + visited = map[*types.FragmentDefinition]struct{}{} + } + + for _, sel := range sels { + switch sel := sel.(type) { + case *types.Field: + if depth > c.maxDepth { + exceededMaxDepth = true + c.addErr(sel.Alias.Loc, "MaxDepthExceeded", "Field %q has depth %d that exceeds max depth %d", sel.Name.Name, depth, c.maxDepth) + continue + } + exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.SelectionSet, visited, depth+1) + + case *types.InlineFragment: + // Depth is not checked because inline fragments resolve to other fields which are checked. + // Depth is not incremented because inline fragments have the same depth as neighboring fields + exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, sel.Selections, visited, depth) + case *types.FragmentSpread: + // Depth is not checked because fragments resolve to other fields which are checked. + frag := c.doc.Fragments.Get(sel.Name.Name) + if frag == nil { + // In case of unknown fragment (invalid request), ignore max depth evaluation + c.addErr(sel.Loc, "MaxDepthEvaluationError", "Unknown fragment %q. Unable to evaluate depth.", sel.Name.Name) + continue + } + + if _, ok := visited[frag]; ok { + // we've already seen this fragment, don't check depth again. + continue + } + visited[frag] = struct{}{} + + // Depth is not incremented because fragments have the same depth as surrounding fields + exceededMaxDepth = exceededMaxDepth || validateMaxDepth(c, frag.Selections, visited, depth) + } + } + + return exceededMaxDepth +} + +func validateSelectionSet(c *opContext, sels []types.Selection, t types.NamedType) { + for _, sel := range sels { + validateSelection(c, sel, t) + } + + for i, a := range sels { + for _, b := range sels[i+1:] { + c.validateOverlap(a, b, nil, nil) + } + } +} + +func validateSelection(c *opContext, sel types.Selection, t types.NamedType) { + switch sel := sel.(type) { + case *types.Field: + validateDirectives(c, "FIELD", sel.Directives) + + fieldName := sel.Name.Name + var f *types.FieldDefinition + switch fieldName { + case "__typename": + f = &types.FieldDefinition{ + Name: "__typename", + Type: c.schema.Types["String"], + } + case "__schema": + f = &types.FieldDefinition{ + Name: "__schema", + Type: c.schema.Types["__Schema"], + } + case "__type": + f = &types.FieldDefinition{ + Name: "__type", + Arguments: types.ArgumentsDefinition{ + &types.InputValueDefinition{ + Name: types.Ident{Name: "name"}, + Type: &types.NonNull{OfType: c.schema.Types["String"]}, + }, + }, + Type: c.schema.Types["__Type"], + } + default: + f = fields(t).Get(fieldName) + if f == nil && t != nil { + suggestion := makeSuggestion("Did you mean", fields(t).Names(), fieldName) + c.addErr(sel.Alias.Loc, "FieldsOnCorrectType", "Cannot query field %q on type %q.%s", fieldName, t, suggestion) + } + } + c.fieldMap[sel] = fieldInfo{sf: f, parent: t} + + validateArgumentLiterals(c, sel.Arguments) + if f != nil { + validateArgumentTypes(c, sel.Arguments, f.Arguments, sel.Alias.Loc, + func() string { return fmt.Sprintf("field %q of type %q", fieldName, t) }, + func() string { return fmt.Sprintf("Field %q", fieldName) }, + ) + } + + var ft types.Type + if f != nil { + ft = f.Type + sf := hasSubfields(ft) + if sf && sel.SelectionSet == nil { + c.addErr(sel.Alias.Loc, "ScalarLeafs", "Field %q of type %q must have a selection of subfields. Did you mean \"%s { ... }\"?", fieldName, ft, fieldName) + } + if !sf && sel.SelectionSet != nil { + c.addErr(sel.SelectionSetLoc, "ScalarLeafs", "Field %q must not have a selection since type %q has no subfields.", fieldName, ft) + } + } + if sel.SelectionSet != nil { + validateSelectionSet(c, sel.SelectionSet, unwrapType(ft)) + } + + case *types.InlineFragment: + validateDirectives(c, "INLINE_FRAGMENT", sel.Directives) + if sel.On.Name != "" { + fragTyp := unwrapType(resolveType(c.context, &sel.On)) + if fragTyp != nil && !compatible(t, fragTyp) { + c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment cannot be spread here as objects of type %q can never be of type %q.", t, fragTyp) + } + t = fragTyp + // continue even if t is nil + } + if t != nil && !canBeFragment(t) { + c.addErr(sel.On.Loc, "FragmentsOnCompositeTypes", "Fragment cannot condition on non composite type %q.", t) + return + } + validateSelectionSet(c, sel.Selections, unwrapType(t)) + + case *types.FragmentSpread: + validateDirectives(c, "FRAGMENT_SPREAD", sel.Directives) + frag := c.doc.Fragments.Get(sel.Name.Name) + if frag == nil { + c.addErr(sel.Name.Loc, "KnownFragmentNames", "Unknown fragment %q.", sel.Name.Name) + return + } + fragTyp := c.schema.Types[frag.On.Name] + if !compatible(t, fragTyp) { + c.addErr(sel.Loc, "PossibleFragmentSpreads", "Fragment %q cannot be spread here as objects of type %q can never be of type %q.", frag.Name.Name, t, fragTyp) + } + + default: + panic("unreachable") + } +} + +func compatible(a, b types.Type) bool { + for _, pta := range possibleTypes(a) { + for _, ptb := range possibleTypes(b) { + if pta == ptb { + return true + } + } + } + return false +} + +func possibleTypes(t types.Type) []*types.ObjectTypeDefinition { + switch t := t.(type) { + case *types.ObjectTypeDefinition: + return []*types.ObjectTypeDefinition{t} + case *types.InterfaceTypeDefinition: + return t.PossibleTypes + case *types.Union: + return t.UnionMemberTypes + default: + return nil + } +} + +func markUsedFragments(c *context, sels []types.Selection, fragUsed map[*types.FragmentDefinition]struct{}) { + for _, sel := range sels { + switch sel := sel.(type) { + case *types.Field: + if sel.SelectionSet != nil { + markUsedFragments(c, sel.SelectionSet, fragUsed) + } + + case *types.InlineFragment: + markUsedFragments(c, sel.Selections, fragUsed) + + case *types.FragmentSpread: + frag := c.doc.Fragments.Get(sel.Name.Name) + if frag == nil { + return + } + + if _, ok := fragUsed[frag]; ok { + continue + } + + fragUsed[frag] = struct{}{} + markUsedFragments(c, frag.Selections, fragUsed) + + default: + panic("unreachable") + } + } +} + +func detectFragmentCycle(c *context, sels []types.Selection, fragVisited map[*types.FragmentDefinition]struct{}, spreadPath []*types.FragmentSpread, spreadPathIndex map[string]int) { + for _, sel := range sels { + detectFragmentCycleSel(c, sel, fragVisited, spreadPath, spreadPathIndex) + } +} + +func detectFragmentCycleSel(c *context, sel types.Selection, fragVisited map[*types.FragmentDefinition]struct{}, spreadPath []*types.FragmentSpread, spreadPathIndex map[string]int) { + switch sel := sel.(type) { + case *types.Field: + if sel.SelectionSet != nil { + detectFragmentCycle(c, sel.SelectionSet, fragVisited, spreadPath, spreadPathIndex) + } + + case *types.InlineFragment: + detectFragmentCycle(c, sel.Selections, fragVisited, spreadPath, spreadPathIndex) + + case *types.FragmentSpread: + frag := c.doc.Fragments.Get(sel.Name.Name) + if frag == nil { + return + } + + spreadPath = append(spreadPath, sel) + if i, ok := spreadPathIndex[frag.Name.Name]; ok { + cyclePath := spreadPath[i:] + via := "" + if len(cyclePath) > 1 { + names := make([]string, len(cyclePath)-1) + for i, frag := range cyclePath[:len(cyclePath)-1] { + names[i] = frag.Name.Name + } + via = " via " + strings.Join(names, ", ") + } + + locs := make([]errors.Location, len(cyclePath)) + for i, frag := range cyclePath { + locs[i] = frag.Loc + } + c.addErrMultiLoc(locs, "NoFragmentCycles", "Cannot spread fragment %q within itself%s.", frag.Name.Name, via) + return + } + + if _, ok := fragVisited[frag]; ok { + return + } + fragVisited[frag] = struct{}{} + + spreadPathIndex[frag.Name.Name] = len(spreadPath) + detectFragmentCycle(c, frag.Selections, fragVisited, spreadPath, spreadPathIndex) + delete(spreadPathIndex, frag.Name.Name) + + default: + panic("unreachable") + } +} + +func (c *context) validateOverlap(a, b types.Selection, reasons *[]string, locs *[]errors.Location) { + if a == b { + return + } + + if _, ok := c.overlapValidated[selectionPair{a, b}]; ok { + return + } + c.overlapValidated[selectionPair{a, b}] = struct{}{} + c.overlapValidated[selectionPair{b, a}] = struct{}{} + + switch a := a.(type) { + case *types.Field: + switch b := b.(type) { + case *types.Field: + if b.Alias.Loc.Before(a.Alias.Loc) { + a, b = b, a + } + if reasons2, locs2 := c.validateFieldOverlap(a, b); len(reasons2) != 0 { + locs2 = append(locs2, a.Alias.Loc, b.Alias.Loc) + if reasons == nil { + c.addErrMultiLoc(locs2, "OverlappingFieldsCanBeMerged", "Fields %q conflict because %s. Use different aliases on the fields to fetch both if this was intentional.", a.Alias.Name, strings.Join(reasons2, " and ")) + return + } + for _, r := range reasons2 { + *reasons = append(*reasons, fmt.Sprintf("subfields %q conflict because %s", a.Alias.Name, r)) + } + *locs = append(*locs, locs2...) + } + + case *types.InlineFragment: + for _, sel := range b.Selections { + c.validateOverlap(a, sel, reasons, locs) + } + + case *types.FragmentSpread: + if frag := c.doc.Fragments.Get(b.Name.Name); frag != nil { + for _, sel := range frag.Selections { + c.validateOverlap(a, sel, reasons, locs) + } + } + + default: + panic("unreachable") + } + + case *types.InlineFragment: + for _, sel := range a.Selections { + c.validateOverlap(sel, b, reasons, locs) + } + + case *types.FragmentSpread: + if frag := c.doc.Fragments.Get(a.Name.Name); frag != nil { + for _, sel := range frag.Selections { + c.validateOverlap(sel, b, reasons, locs) + } + } + + default: + panic("unreachable") + } +} + +func (c *context) validateFieldOverlap(a, b *types.Field) ([]string, []errors.Location) { + if a.Alias.Name != b.Alias.Name { + return nil, nil + } + + if asf := c.fieldMap[a].sf; asf != nil { + if bsf := c.fieldMap[b].sf; bsf != nil { + if !typesCompatible(asf.Type, bsf.Type) { + return []string{fmt.Sprintf("they return conflicting types %s and %s", asf.Type, bsf.Type)}, nil + } + } + } + + at := c.fieldMap[a].parent + bt := c.fieldMap[b].parent + if at == nil || bt == nil || at == bt { + if a.Name.Name != b.Name.Name { + return []string{fmt.Sprintf("%s and %s are different fields", a.Name.Name, b.Name.Name)}, nil + } + + if argumentsConflict(a.Arguments, b.Arguments) { + return []string{"they have differing arguments"}, nil + } + } + + var reasons []string + var locs []errors.Location + for _, a2 := range a.SelectionSet { + for _, b2 := range b.SelectionSet { + c.validateOverlap(a2, b2, &reasons, &locs) + } + } + return reasons, locs +} + +func argumentsConflict(a, b types.ArgumentList) bool { + if len(a) != len(b) { + return true + } + for _, argA := range a { + valB, ok := b.Get(argA.Name.Name) + if !ok || !reflect.DeepEqual(argA.Value.Deserialize(nil), valB.Deserialize(nil)) { + return true + } + } + return false +} + +func fields(t types.Type) types.FieldsDefinition { + switch t := t.(type) { + case *types.ObjectTypeDefinition: + return t.Fields + case *types.InterfaceTypeDefinition: + return t.Fields + default: + return nil + } +} + +func unwrapType(t types.Type) types.NamedType { + if t == nil { + return nil + } + for { + switch t2 := t.(type) { + case types.NamedType: + return t2 + case *types.List: + t = t2.OfType + case *types.NonNull: + t = t2.OfType + default: + panic("unreachable") + } + } +} + +func resolveType(c *context, t types.Type) types.Type { + t2, err := common.ResolveType(t, c.schema.Resolve) + if err != nil { + c.errs = append(c.errs, err) + } + return t2 +} + +func validateDirectives(c *opContext, loc string, directives types.DirectiveList) { + directiveNames := make(nameSet) + for _, d := range directives { + dirName := d.Name.Name + validateNameCustomMsg(c.context, directiveNames, d.Name, "UniqueDirectivesPerLocation", func() string { + return fmt.Sprintf("The directive %q can only be used once at this location.", dirName) + }) + + validateArgumentLiterals(c, d.Arguments) + + dd, ok := c.schema.Directives[dirName] + if !ok { + c.addErr(d.Name.Loc, "KnownDirectives", "Unknown directive %q.", dirName) + continue + } + + locOK := false + for _, allowedLoc := range dd.Locations { + if loc == allowedLoc { + locOK = true + break + } + } + if !locOK { + c.addErr(d.Name.Loc, "KnownDirectives", "Directive %q may not be used on %s.", dirName, loc) + } + + validateArgumentTypes(c, d.Arguments, dd.Arguments, d.Name.Loc, + func() string { return fmt.Sprintf("directive %q", "@"+dirName) }, + func() string { return fmt.Sprintf("Directive %q", "@"+dirName) }, + ) + } +} + +func validateName(c *context, set nameSet, name types.Ident, rule string, kind string) { + validateNameCustomMsg(c, set, name, rule, func() string { + return fmt.Sprintf("There can be only one %s named %q.", kind, name.Name) + }) +} + +func validateNameCustomMsg(c *context, set nameSet, name types.Ident, rule string, msg func() string) { + if loc, ok := set[name.Name]; ok { + c.addErrMultiLoc([]errors.Location{loc, name.Loc}, rule, msg()) + return + } + set[name.Name] = name.Loc +} + +func validateArgumentTypes(c *opContext, args types.ArgumentList, argDecls types.ArgumentsDefinition, loc errors.Location, owner1, owner2 func() string) { + for _, selArg := range args { + arg := argDecls.Get(selArg.Name.Name) + if arg == nil { + c.addErr(selArg.Name.Loc, "KnownArgumentNames", "Unknown argument %q on %s.", selArg.Name.Name, owner1()) + continue + } + value := selArg.Value + if ok, reason := validateValueType(c, value, arg.Type); !ok { + c.addErr(value.Location(), "ArgumentsOfCorrectType", "Argument %q has invalid value %s.\n%s", arg.Name.Name, value, reason) + } + } + for _, decl := range argDecls { + if _, ok := decl.Type.(*types.NonNull); ok { + if _, ok := args.Get(decl.Name.Name); !ok { + c.addErr(loc, "ProvidedNonNullArguments", "%s argument %q of type %q is required but not provided.", owner2(), decl.Name.Name, decl.Type) + } + } + } +} + +func validateArgumentLiterals(c *opContext, args types.ArgumentList) { + argNames := make(nameSet) + for _, arg := range args { + validateName(c.context, argNames, arg.Name, "UniqueArgumentNames", "argument") + validateLiteral(c, arg.Value) + } +} + +func validateLiteral(c *opContext, l types.Value) { + switch l := l.(type) { + case *types.ObjectValue: + fieldNames := make(nameSet) + for _, f := range l.Fields { + validateName(c.context, fieldNames, f.Name, "UniqueInputFieldNames", "input field") + validateLiteral(c, f.Value) + } + case *types.ListValue: + for _, entry := range l.Values { + validateLiteral(c, entry) + } + case *types.Variable: + for _, op := range c.ops { + v := op.Vars.Get(l.Name) + if v == nil { + byOp := "" + if op.Name.Name != "" { + byOp = fmt.Sprintf(" by operation %q", op.Name.Name) + } + c.opErrs[op] = append(c.opErrs[op], &errors.QueryError{ + Message: fmt.Sprintf("Variable %q is not defined%s.", "$"+l.Name, byOp), + Locations: []errors.Location{l.Loc, op.Loc}, + Rule: "NoUndefinedVariables", + }) + continue + } + validateValueType(c, l, resolveType(c.context, v.Type)) + c.usedVars[op][v] = struct{}{} + } + } +} + +func validateValueType(c *opContext, v types.Value, t types.Type) (bool, string) { + if v, ok := v.(*types.Variable); ok { + for _, op := range c.ops { + if v2 := op.Vars.Get(v.Name); v2 != nil { + t2, err := common.ResolveType(v2.Type, c.schema.Resolve) + if _, ok := t2.(*types.NonNull); !ok && v2.Default != nil { + t2 = &types.NonNull{OfType: t2} + } + if err == nil && !typeCanBeUsedAs(t2, t) { + c.addErrMultiLoc([]errors.Location{v2.Loc, v.Loc}, "VariablesInAllowedPosition", "Variable %q of type %q used in position expecting type %q.", "$"+v.Name, t2, t) + } + } + } + return true, "" + } + + if nn, ok := t.(*types.NonNull); ok { + if isNull(v) { + return false, fmt.Sprintf("Expected %q, found null.", t) + } + t = nn.OfType + } + if isNull(v) { + return true, "" + } + + switch t := t.(type) { + case *types.ScalarTypeDefinition, *types.EnumTypeDefinition: + if lit, ok := v.(*types.PrimitiveValue); ok { + if validateBasicLit(lit, t) { + return true, "" + } + return false, fmt.Sprintf("Expected type %q, found %s.", t, v) + } + return true, "" + + case *types.List: + list, ok := v.(*types.ListValue) + if !ok { + return validateValueType(c, v, t.OfType) // single value instead of list + } + for i, entry := range list.Values { + if ok, reason := validateValueType(c, entry, t.OfType); !ok { + return false, fmt.Sprintf("In element #%d: %s", i, reason) + } + } + return true, "" + + case *types.InputObject: + v, ok := v.(*types.ObjectValue) + if !ok { + return false, fmt.Sprintf("Expected %q, found not an object.", t) + } + for _, f := range v.Fields { + name := f.Name.Name + iv := t.Values.Get(name) + if iv == nil { + return false, fmt.Sprintf("In field %q: Unknown field.", name) + } + if ok, reason := validateValueType(c, f.Value, iv.Type); !ok { + return false, fmt.Sprintf("In field %q: %s", name, reason) + } + } + for _, iv := range t.Values { + found := false + for _, f := range v.Fields { + if f.Name.Name == iv.Name.Name { + found = true + break + } + } + if !found { + if _, ok := iv.Type.(*types.NonNull); ok && iv.Default == nil { + return false, fmt.Sprintf("In field %q: Expected %q, found null.", iv.Name.Name, iv.Type) + } + } + } + return true, "" + } + + return false, fmt.Sprintf("Expected type %q, found %s.", t, v) +} + +func validateBasicLit(v *types.PrimitiveValue, t types.Type) bool { + switch t := t.(type) { + case *types.ScalarTypeDefinition: + switch t.Name { + case "Int": + if v.Type != scanner.Int { + return false + } + f, err := strconv.ParseFloat(v.Text, 64) + if err != nil { + panic(err) + } + return f >= math.MinInt32 && f <= math.MaxInt32 + case "Float": + return v.Type == scanner.Int || v.Type == scanner.Float + case "String": + return v.Type == scanner.String + case "Boolean": + return v.Type == scanner.Ident && (v.Text == "true" || v.Text == "false") + case "ID": + return v.Type == scanner.Int || v.Type == scanner.String + default: + //TODO: Type-check against expected type by Unmarshalling + return true + } + + case *types.EnumTypeDefinition: + if v.Type != scanner.Ident { + return false + } + for _, option := range t.EnumValuesDefinition { + if option.EnumValue == v.Text { + return true + } + } + return false + } + + return false +} + +func canBeFragment(t types.Type) bool { + switch t.(type) { + case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union: + return true + default: + return false + } +} + +func canBeInput(t types.Type) bool { + switch t := t.(type) { + case *types.InputObject, *types.ScalarTypeDefinition, *types.EnumTypeDefinition: + return true + case *types.List: + return canBeInput(t.OfType) + case *types.NonNull: + return canBeInput(t.OfType) + default: + return false + } +} + +func hasSubfields(t types.Type) bool { + switch t := t.(type) { + case *types.ObjectTypeDefinition, *types.InterfaceTypeDefinition, *types.Union: + return true + case *types.List: + return hasSubfields(t.OfType) + case *types.NonNull: + return hasSubfields(t.OfType) + default: + return false + } +} + +func isLeaf(t types.Type) bool { + switch t.(type) { + case *types.ScalarTypeDefinition, *types.EnumTypeDefinition: + return true + default: + return false + } +} + +func isNull(lit interface{}) bool { + _, ok := lit.(*types.NullValue) + return ok +} + +func typesCompatible(a, b types.Type) bool { + al, aIsList := a.(*types.List) + bl, bIsList := b.(*types.List) + if aIsList || bIsList { + return aIsList && bIsList && typesCompatible(al.OfType, bl.OfType) + } + + ann, aIsNN := a.(*types.NonNull) + bnn, bIsNN := b.(*types.NonNull) + if aIsNN || bIsNN { + return aIsNN && bIsNN && typesCompatible(ann.OfType, bnn.OfType) + } + + if isLeaf(a) || isLeaf(b) { + return a == b + } + + return true +} + +func typeCanBeUsedAs(t, as types.Type) bool { + nnT, okT := t.(*types.NonNull) + if okT { + t = nnT.OfType + } + + nnAs, okAs := as.(*types.NonNull) + if okAs { + as = nnAs.OfType + if !okT { + return false // nullable can not be used as non-null + } + } + + if t == as { + return true + } + + if lT, ok := t.(*types.List); ok { + if lAs, ok := as.(*types.List); ok { + return typeCanBeUsedAs(lT.OfType, lAs.OfType) + } + } + return false +} diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection.go new file mode 100644 index 000000000..6877bcaf3 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/introspection.go @@ -0,0 +1,118 @@ +package graphql + +import ( + "context" + "encoding/json" + + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/introspection" +) + +// Inspect allows inspection of the given schema. +func (s *Schema) Inspect() *introspection.Schema { + return introspection.WrapSchema(s.schema) +} + +// ToJSON encodes the schema in a JSON format used by tools like Relay. +func (s *Schema) ToJSON() ([]byte, error) { + result := s.exec(context.Background(), introspectionQuery, "", nil, &resolvable.Schema{ + Meta: s.res.Meta, + Query: &resolvable.Object{}, + Schema: *s.schema, + }) + if len(result.Errors) != 0 { + panic(result.Errors[0]) + } + return json.MarshalIndent(result.Data, "", "\t") +} + +var introspectionQuery = ` + query { + __schema { + queryType { name } + mutationType { name } + subscriptionType { name } + types { + ...FullType + } + directives { + name + description + locations + args { + ...InputValue + } + } + } + } + fragment FullType on __Type { + kind + name + description + fields(includeDeprecated: true) { + name + description + args { + ...InputValue + } + type { + ...TypeRef + } + isDeprecated + deprecationReason + } + inputFields { + ...InputValue + } + interfaces { + ...TypeRef + } + enumValues(includeDeprecated: true) { + name + description + isDeprecated + deprecationReason + } + possibleTypes { + ...TypeRef + } + } + fragment InputValue on __InputValue { + name + description + type { ...TypeRef } + defaultValue + } + fragment TypeRef on __Type { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + ofType { + kind + name + } + } + } + } + } + } + } + } +` diff --git a/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go new file mode 100644 index 000000000..a0a2fa9bb --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/introspection/introspection.go @@ -0,0 +1,312 @@ +package introspection + +import ( + "sort" + + "github.com/graph-gophers/graphql-go/types" +) + +type Schema struct { + schema *types.Schema +} + +// WrapSchema is only used internally. +func WrapSchema(schema *types.Schema) *Schema { + return &Schema{schema} +} + +func (r *Schema) Types() []*Type { + var names []string + for name := range r.schema.Types { + names = append(names, name) + } + sort.Strings(names) + + l := make([]*Type, len(names)) + for i, name := range names { + l[i] = &Type{r.schema.Types[name]} + } + return l +} + +func (r *Schema) Directives() []*Directive { + var names []string + for name := range r.schema.Directives { + names = append(names, name) + } + sort.Strings(names) + + l := make([]*Directive, len(names)) + for i, name := range names { + l[i] = &Directive{r.schema.Directives[name]} + } + return l +} + +func (r *Schema) QueryType() *Type { + t, ok := r.schema.EntryPoints["query"] + if !ok { + return nil + } + return &Type{t} +} + +func (r *Schema) MutationType() *Type { + t, ok := r.schema.EntryPoints["mutation"] + if !ok { + return nil + } + return &Type{t} +} + +func (r *Schema) SubscriptionType() *Type { + t, ok := r.schema.EntryPoints["subscription"] + if !ok { + return nil + } + return &Type{t} +} + +type Type struct { + typ types.Type +} + +// WrapType is only used internally. +func WrapType(typ types.Type) *Type { + return &Type{typ} +} + +func (r *Type) Kind() string { + return r.typ.Kind() +} + +func (r *Type) Name() *string { + if named, ok := r.typ.(types.NamedType); ok { + name := named.TypeName() + return &name + } + return nil +} + +func (r *Type) Description() *string { + if named, ok := r.typ.(types.NamedType); ok { + desc := named.Description() + if desc == "" { + return nil + } + return &desc + } + return nil +} + +func (r *Type) Fields(args *struct{ IncludeDeprecated bool }) *[]*Field { + var fields types.FieldsDefinition + switch t := r.typ.(type) { + case *types.ObjectTypeDefinition: + fields = t.Fields + case *types.InterfaceTypeDefinition: + fields = t.Fields + default: + return nil + } + + var l []*Field + for _, f := range fields { + if d := f.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated { + l = append(l, &Field{field: f}) + } + } + return &l +} + +func (r *Type) Interfaces() *[]*Type { + t, ok := r.typ.(*types.ObjectTypeDefinition) + if !ok { + return nil + } + + l := make([]*Type, len(t.Interfaces)) + for i, intf := range t.Interfaces { + l[i] = &Type{intf} + } + return &l +} + +func (r *Type) PossibleTypes() *[]*Type { + var possibleTypes []*types.ObjectTypeDefinition + switch t := r.typ.(type) { + case *types.InterfaceTypeDefinition: + possibleTypes = t.PossibleTypes + case *types.Union: + possibleTypes = t.UnionMemberTypes + default: + return nil + } + + l := make([]*Type, len(possibleTypes)) + for i, intf := range possibleTypes { + l[i] = &Type{intf} + } + return &l +} + +func (r *Type) EnumValues(args *struct{ IncludeDeprecated bool }) *[]*EnumValue { + t, ok := r.typ.(*types.EnumTypeDefinition) + if !ok { + return nil + } + + var l []*EnumValue + for _, v := range t.EnumValuesDefinition { + if d := v.Directives.Get("deprecated"); d == nil || args.IncludeDeprecated { + l = append(l, &EnumValue{v}) + } + } + return &l +} + +func (r *Type) InputFields() *[]*InputValue { + t, ok := r.typ.(*types.InputObject) + if !ok { + return nil + } + + l := make([]*InputValue, len(t.Values)) + for i, v := range t.Values { + l[i] = &InputValue{v} + } + return &l +} + +func (r *Type) OfType() *Type { + switch t := r.typ.(type) { + case *types.List: + return &Type{t.OfType} + case *types.NonNull: + return &Type{t.OfType} + default: + return nil + } +} + +type Field struct { + field *types.FieldDefinition +} + +func (r *Field) Name() string { + return r.field.Name +} + +func (r *Field) Description() *string { + if r.field.Desc == "" { + return nil + } + return &r.field.Desc +} + +func (r *Field) Args() []*InputValue { + l := make([]*InputValue, len(r.field.Arguments)) + for i, v := range r.field.Arguments { + l[i] = &InputValue{v} + } + return l +} + +func (r *Field) Type() *Type { + return &Type{r.field.Type} +} + +func (r *Field) IsDeprecated() bool { + return r.field.Directives.Get("deprecated") != nil +} + +func (r *Field) DeprecationReason() *string { + d := r.field.Directives.Get("deprecated") + if d == nil { + return nil + } + reason := d.Arguments.MustGet("reason").Deserialize(nil).(string) + return &reason +} + +type InputValue struct { + value *types.InputValueDefinition +} + +func (r *InputValue) Name() string { + return r.value.Name.Name +} + +func (r *InputValue) Description() *string { + if r.value.Desc == "" { + return nil + } + return &r.value.Desc +} + +func (r *InputValue) Type() *Type { + return &Type{r.value.Type} +} + +func (r *InputValue) DefaultValue() *string { + if r.value.Default == nil { + return nil + } + s := r.value.Default.String() + return &s +} + +type EnumValue struct { + value *types.EnumValueDefinition +} + +func (r *EnumValue) Name() string { + return r.value.EnumValue +} + +func (r *EnumValue) Description() *string { + if r.value.Desc == "" { + return nil + } + return &r.value.Desc +} + +func (r *EnumValue) IsDeprecated() bool { + return r.value.Directives.Get("deprecated") != nil +} + +func (r *EnumValue) DeprecationReason() *string { + d := r.value.Directives.Get("deprecated") + if d == nil { + return nil + } + reason := d.Arguments.MustGet("reason").Deserialize(nil).(string) + return &reason +} + +type Directive struct { + directive *types.DirectiveDefinition +} + +func (r *Directive) Name() string { + return r.directive.Name +} + +func (r *Directive) Description() *string { + if r.directive.Desc == "" { + return nil + } + return &r.directive.Desc +} + +func (r *Directive) Locations() []string { + return r.directive.Locations +} + +func (r *Directive) Args() []*InputValue { + l := make([]*InputValue, len(r.directive.Arguments)) + for i, v := range r.directive.Arguments { + l[i] = &InputValue{v} + } + return l +} diff --git a/vendor/github.com/graph-gophers/graphql-go/nullable_types.go b/vendor/github.com/graph-gophers/graphql-go/nullable_types.go new file mode 100644 index 000000000..fa5bbfd62 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/nullable_types.go @@ -0,0 +1,166 @@ +package graphql + +import ( + "fmt" + "math" +) + +// NullString is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullString struct { + Value *string + Set bool +} + +func (NullString) ImplementsGraphQLType(name string) bool { + return name == "String" +} + +func (s *NullString) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + switch v := input.(type) { + case string: + s.Value = &v + return nil + default: + return fmt.Errorf("wrong type for String: %T", v) + } +} + +func (s *NullString) Nullable() {} + +// NullBool is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullBool struct { + Value *bool + Set bool +} + +func (NullBool) ImplementsGraphQLType(name string) bool { + return name == "Boolean" +} + +func (s *NullBool) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + switch v := input.(type) { + case bool: + s.Value = &v + return nil + default: + return fmt.Errorf("wrong type for Boolean: %T", v) + } +} + +func (s *NullBool) Nullable() {} + +// NullInt is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullInt struct { + Value *int32 + Set bool +} + +func (NullInt) ImplementsGraphQLType(name string) bool { + return name == "Int" +} + +func (s *NullInt) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + switch v := input.(type) { + case int32: + s.Value = &v + return nil + case float64: + coerced := int32(v) + if v < math.MinInt32 || v > math.MaxInt32 || float64(coerced) != v { + return fmt.Errorf("not a 32-bit integer") + } + s.Value = &coerced + return nil + default: + return fmt.Errorf("wrong type for Int: %T", v) + } +} + +func (s *NullInt) Nullable() {} + +// NullFloat is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullFloat struct { + Value *float64 + Set bool +} + +func (NullFloat) ImplementsGraphQLType(name string) bool { + return name == "Float" +} + +func (s *NullFloat) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + switch v := input.(type) { + case float64: + s.Value = &v + return nil + case int32: + coerced := float64(v) + s.Value = &coerced + return nil + case int: + coerced := float64(v) + s.Value = &coerced + return nil + default: + return fmt.Errorf("wrong type for Float: %T", v) + } +} + +func (s *NullFloat) Nullable() {} + +// NullTime is a string that can be null. Use it in input structs to +// differentiate a value explicitly set to null from an omitted value. +// When the value is defined (either null or a value) Set is true. +type NullTime struct { + Value *Time + Set bool +} + +func (NullTime) ImplementsGraphQLType(name string) bool { + return name == "Time" +} + +func (s *NullTime) UnmarshalGraphQL(input interface{}) error { + s.Set = true + + if input == nil { + return nil + } + + s.Value = new(Time) + return s.Value.UnmarshalGraphQL(input) +} + +func (s *NullTime) Nullable() {} diff --git a/vendor/github.com/graph-gophers/graphql-go/relay/relay.go b/vendor/github.com/graph-gophers/graphql-go/relay/relay.go new file mode 100644 index 000000000..4768f5fd8 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/relay/relay.go @@ -0,0 +1,70 @@ +package relay + +import ( + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/http" + "strings" + + graphql "github.com/graph-gophers/graphql-go" +) + +func MarshalID(kind string, spec interface{}) graphql.ID { + d, err := json.Marshal(spec) + if err != nil { + panic(fmt.Errorf("relay.MarshalID: %s", err)) + } + return graphql.ID(base64.URLEncoding.EncodeToString(append([]byte(kind+":"), d...))) +} + +func UnmarshalKind(id graphql.ID) string { + s, err := base64.URLEncoding.DecodeString(string(id)) + if err != nil { + return "" + } + i := strings.IndexByte(string(s), ':') + if i == -1 { + return "" + } + return string(s[:i]) +} + +func UnmarshalSpec(id graphql.ID, v interface{}) error { + s, err := base64.URLEncoding.DecodeString(string(id)) + if err != nil { + return err + } + i := strings.IndexByte(string(s), ':') + if i == -1 { + return errors.New("invalid graphql.ID") + } + return json.Unmarshal(s[i+1:], v) +} + +type Handler struct { + Schema *graphql.Schema +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var params struct { + Query string `json:"query"` + OperationName string `json:"operationName"` + Variables map[string]interface{} `json:"variables"` + } + if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + response := h.Schema.Exec(r.Context(), params.Query, params.OperationName, params.Variables) + responseJSON, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + w.Write(responseJSON) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/subscriptions.go b/vendor/github.com/graph-gophers/graphql-go/subscriptions.go new file mode 100644 index 000000000..34064dc77 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/subscriptions.go @@ -0,0 +1,96 @@ +package graphql + +import ( + "context" + "errors" + + qerrors "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/internal/common" + "github.com/graph-gophers/graphql-go/internal/exec" + "github.com/graph-gophers/graphql-go/internal/exec/resolvable" + "github.com/graph-gophers/graphql-go/internal/exec/selected" + "github.com/graph-gophers/graphql-go/internal/query" + "github.com/graph-gophers/graphql-go/internal/validation" + "github.com/graph-gophers/graphql-go/introspection" +) + +// Subscribe returns a response channel for the given subscription with the schema's +// resolver. It returns an error if the schema was created without a resolver. +// If the context gets cancelled, the response channel will be closed and no +// further resolvers will be called. The context error will be returned as soon +// as possible (not immediately). +func (s *Schema) Subscribe(ctx context.Context, queryString string, operationName string, variables map[string]interface{}) (<-chan interface{}, error) { + if !s.res.Resolver.IsValid() { + return nil, errors.New("schema created without resolver, can not subscribe") + } + if _, ok := s.schema.EntryPoints["subscription"]; !ok { + return nil, errors.New("no subscriptions are offered by the schema") + } + return s.subscribe(ctx, queryString, operationName, variables, s.res), nil +} + +func (s *Schema) subscribe(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, res *resolvable.Schema) <-chan interface{} { + doc, qErr := query.Parse(queryString) + if qErr != nil { + return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{qErr}}) + } + + validationFinish := s.validationTracer.TraceValidation(ctx) + errs := validation.Validate(s.schema, doc, variables, s.maxDepth) + validationFinish(errs) + if len(errs) != 0 { + return sendAndReturnClosed(&Response{Errors: errs}) + } + + op, err := getOperation(doc, operationName) + if err != nil { + return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{qerrors.Errorf("%s", err)}}) + } + + r := &exec.Request{ + Request: selected.Request{ + Doc: doc, + Vars: variables, + Schema: s.schema, + }, + Limiter: make(chan struct{}, s.maxParallelism), + Tracer: s.tracer, + Logger: s.logger, + PanicHandler: s.panicHandler, + SubscribeResolverTimeout: s.subscribeResolverTimeout, + } + varTypes := make(map[string]*introspection.Type) + for _, v := range op.Vars { + t, err := common.ResolveType(v.Type, s.schema.Resolve) + if err != nil { + return sendAndReturnClosed(&Response{Errors: []*qerrors.QueryError{err}}) + } + varTypes[v.Name.Name] = introspection.WrapType(t) + } + + if op.Type == query.Query || op.Type == query.Mutation { + data, errs := r.Execute(ctx, res, op) + return sendAndReturnClosed(&Response{Data: data, Errors: errs}) + } + + responses := r.Subscribe(ctx, res, op) + c := make(chan interface{}) + go func() { + for resp := range responses { + c <- &Response{ + Data: resp.Data, + Errors: resp.Errors, + } + } + close(c) + }() + + return c +} + +func sendAndReturnClosed(resp *Response) chan interface{} { + c := make(chan interface{}, 1) + c <- resp + close(c) + return c +} diff --git a/vendor/github.com/graph-gophers/graphql-go/time.go b/vendor/github.com/graph-gophers/graphql-go/time.go new file mode 100644 index 000000000..974287e7b --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/time.go @@ -0,0 +1,64 @@ +package graphql + +import ( + "encoding/json" + "fmt" + "time" +) + +// Time is a custom GraphQL type to represent an instant in time. It has to be added to a schema +// via "scalar Time" since it is not a predeclared GraphQL type like "ID". +type Time struct { + time.Time +} + +// ImplementsGraphQLType maps this custom Go type +// to the graphql scalar type in the schema. +func (Time) ImplementsGraphQLType(name string) bool { + return name == "Time" +} + +// UnmarshalGraphQL is a custom unmarshaler for Time +// +// This function will be called whenever you use the +// time scalar as an input +func (t *Time) UnmarshalGraphQL(input interface{}) error { + switch input := input.(type) { + case time.Time: + t.Time = input + return nil + case string: + var err error + t.Time, err = time.Parse(time.RFC3339, input) + return err + case []byte: + var err error + t.Time, err = time.Parse(time.RFC3339, string(input)) + return err + case int32: + t.Time = time.Unix(int64(input), 0) + return nil + case int64: + if input >= 1e10 { + sec := input / 1e9 + nsec := input - (sec * 1e9) + t.Time = time.Unix(sec, nsec) + } else { + t.Time = time.Unix(input, 0) + } + return nil + case float64: + t.Time = time.Unix(int64(input), 0) + return nil + default: + return fmt.Errorf("wrong type for Time: %T", input) + } +} + +// MarshalJSON is a custom marshaler for Time +// +// This function will be called whenever you +// query for fields that use the Time type +func (t Time) MarshalJSON() ([]byte, error) { + return json.Marshal(t.Time) +} diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go new file mode 100644 index 000000000..8d5d8a712 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/trace/trace.go @@ -0,0 +1,96 @@ +package trace + +import ( + "context" + "fmt" + + "github.com/graph-gophers/graphql-go/errors" + "github.com/graph-gophers/graphql-go/introspection" + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" +) + +type TraceQueryFinishFunc func([]*errors.QueryError) +type TraceFieldFinishFunc func(*errors.QueryError) + +type Tracer interface { + TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) + TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) +} + +type OpenTracingTracer struct{} + +func (OpenTracingTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) { + span, spanCtx := opentracing.StartSpanFromContext(ctx, "GraphQL request") + span.SetTag("graphql.query", queryString) + + if operationName != "" { + span.SetTag("graphql.operationName", operationName) + } + + if len(variables) != 0 { + span.LogFields(log.Object("graphql.variables", variables)) + } + + return spanCtx, func(errs []*errors.QueryError) { + if len(errs) > 0 { + msg := errs[0].Error() + if len(errs) > 1 { + msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1) + } + ext.Error.Set(span, true) + span.SetTag("graphql.error", msg) + } + span.Finish() + } +} + +func (OpenTracingTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) { + if trivial { + return ctx, noop + } + + span, spanCtx := opentracing.StartSpanFromContext(ctx, label) + span.SetTag("graphql.type", typeName) + span.SetTag("graphql.field", fieldName) + for name, value := range args { + span.SetTag("graphql.args."+name, value) + } + + return spanCtx, func(err *errors.QueryError) { + if err != nil { + ext.Error.Set(span, true) + span.SetTag("graphql.error", err.Error()) + } + span.Finish() + } +} + +func (OpenTracingTracer) TraceValidation(ctx context.Context) TraceValidationFinishFunc { + span, _ := opentracing.StartSpanFromContext(ctx, "Validate Query") + + return func(errs []*errors.QueryError) { + if len(errs) > 0 { + msg := errs[0].Error() + if len(errs) > 1 { + msg += fmt.Sprintf(" (and %d more errors)", len(errs)-1) + } + ext.Error.Set(span, true) + span.SetTag("graphql.error", msg) + } + span.Finish() + } +} + +func noop(*errors.QueryError) {} + +type NoopTracer struct{} + +func (NoopTracer) TraceQuery(ctx context.Context, queryString string, operationName string, variables map[string]interface{}, varTypes map[string]*introspection.Type) (context.Context, TraceQueryFinishFunc) { + return ctx, func(errs []*errors.QueryError) {} +} + +func (NoopTracer) TraceField(ctx context.Context, label, typeName, fieldName string, trivial bool, args map[string]interface{}) (context.Context, TraceFieldFinishFunc) { + return ctx, func(err *errors.QueryError) {} +} diff --git a/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go new file mode 100644 index 000000000..bce7a9a4f --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/trace/validation_trace.go @@ -0,0 +1,25 @@ +package trace + +import ( + "context" + + "github.com/graph-gophers/graphql-go/errors" +) + +type TraceValidationFinishFunc = TraceQueryFinishFunc + +// Deprecated: use ValidationTracerContext. +type ValidationTracer interface { + TraceValidation() TraceValidationFinishFunc +} + +type ValidationTracerContext interface { + TraceValidation(ctx context.Context) TraceValidationFinishFunc +} + +type NoopValidationTracer struct{} + +// Deprecated: use a Tracer which implements ValidationTracerContext. +func (NoopValidationTracer) TraceValidation() TraceValidationFinishFunc { + return func(errs []*errors.QueryError) {} +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/argument.go b/vendor/github.com/graph-gophers/graphql-go/types/argument.go new file mode 100644 index 000000000..b2681a284 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/argument.go @@ -0,0 +1,44 @@ +package types + +// Argument is a representation of the GraphQL Argument. +// +// https://spec.graphql.org/draft/#sec-Language.Arguments +type Argument struct { + Name Ident + Value Value +} + +// ArgumentList is a collection of GraphQL Arguments. +type ArgumentList []*Argument + +// Returns a Value in the ArgumentList by name. +func (l ArgumentList) Get(name string) (Value, bool) { + for _, arg := range l { + if arg.Name.Name == name { + return arg.Value, true + } + } + return nil, false +} + +// MustGet returns a Value in the ArgumentList by name. +// MustGet will panic if the argument name is not found in the ArgumentList. +func (l ArgumentList) MustGet(name string) Value { + value, ok := l.Get(name) + if !ok { + panic("argument not found") + } + return value +} + +type ArgumentsDefinition []*InputValueDefinition + +// Get returns an InputValueDefinition in the ArgumentsDefinition by name or nil if not found. +func (a ArgumentsDefinition) Get(name string) *InputValueDefinition { + for _, inputValue := range a { + if inputValue.Name.Name == name { + return inputValue + } + } + return nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/directive.go b/vendor/github.com/graph-gophers/graphql-go/types/directive.go new file mode 100644 index 000000000..0f8a4b993 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/directive.go @@ -0,0 +1,34 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// Directive is a representation of the GraphQL Directive. +// +// http://spec.graphql.org/draft/#sec-Language.Directives +type Directive struct { + Name Ident + Arguments ArgumentList +} + +// DirectiveDefinition is a representation of the GraphQL DirectiveDefinition. +// +// http://spec.graphql.org/draft/#sec-Type-System.Directives +type DirectiveDefinition struct { + Name string + Desc string + Locations []string + Arguments ArgumentsDefinition + Loc errors.Location +} + +type DirectiveList []*Directive + +// Returns the Directive in the DirectiveList by name or nil if not found. +func (l DirectiveList) Get(name string) *Directive { + for _, d := range l { + if d.Name.Name == name { + return d + } + } + return nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/doc.go b/vendor/github.com/graph-gophers/graphql-go/types/doc.go new file mode 100644 index 000000000..87caa60b8 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/doc.go @@ -0,0 +1,9 @@ +/* + Package types represents all types from the GraphQL specification in code. + + + The names of the Go types, whenever possible, match 1:1 with the names from + the specification. + +*/ +package types diff --git a/vendor/github.com/graph-gophers/graphql-go/types/enum.go b/vendor/github.com/graph-gophers/graphql-go/types/enum.go new file mode 100644 index 000000000..b2c84caa4 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/enum.go @@ -0,0 +1,32 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// EnumTypeDefinition defines a set of possible enum values. +// +// Like scalar types, an EnumTypeDefinition also represents a leaf value in a GraphQL type system. +// +// http://spec.graphql.org/draft/#sec-Enums +type EnumTypeDefinition struct { + Name string + EnumValuesDefinition []*EnumValueDefinition + Desc string + Directives DirectiveList + Loc errors.Location +} + +// EnumValueDefinition are unique values that may be serialized as a string: the name of the +// represented value. +// +// http://spec.graphql.org/draft/#EnumValueDefinition +type EnumValueDefinition struct { + EnumValue string + Directives DirectiveList + Desc string + Loc errors.Location +} + +func (*EnumTypeDefinition) Kind() string { return "ENUM" } +func (t *EnumTypeDefinition) String() string { return t.Name } +func (t *EnumTypeDefinition) TypeName() string { return t.Name } +func (t *EnumTypeDefinition) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/extension.go b/vendor/github.com/graph-gophers/graphql-go/types/extension.go new file mode 100644 index 000000000..b82ea6705 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/extension.go @@ -0,0 +1,13 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// Extension type defines a GraphQL type extension. +// Schemas, Objects, Inputs and Scalars can be extended. +// +// https://spec.graphql.org/draft/#sec-Type-System-Extensions +type Extension struct { + Type NamedType + Directives DirectiveList + Loc errors.Location +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/field.go b/vendor/github.com/graph-gophers/graphql-go/types/field.go new file mode 100644 index 000000000..ea5bca5c8 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/field.go @@ -0,0 +1,39 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// FieldDefinition is a representation of a GraphQL FieldDefinition. +// +// http://spec.graphql.org/draft/#FieldDefinition +type FieldDefinition struct { + Name string + Arguments ArgumentsDefinition + Type Type + Directives DirectiveList + Desc string + Loc errors.Location +} + +// FieldsDefinition is a list of an ObjectTypeDefinition's Fields. +// +// https://spec.graphql.org/draft/#FieldsDefinition +type FieldsDefinition []*FieldDefinition + +// Get returns a FieldDefinition in a FieldsDefinition by name or nil if not found. +func (l FieldsDefinition) Get(name string) *FieldDefinition { + for _, f := range l { + if f.Name == name { + return f + } + } + return nil +} + +// Names returns a slice of FieldDefinition names. +func (l FieldsDefinition) Names() []string { + names := make([]string, len(l)) + for i, f := range l { + names[i] = f.Name + } + return names +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/fragment.go b/vendor/github.com/graph-gophers/graphql-go/types/fragment.go new file mode 100644 index 000000000..606219cab --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/fragment.go @@ -0,0 +1,51 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +type Fragment struct { + On TypeName + Selections SelectionSet +} + +// InlineFragment is a representation of the GraphQL InlineFragment. +// +// http://spec.graphql.org/draft/#InlineFragment +type InlineFragment struct { + Fragment + Directives DirectiveList + Loc errors.Location +} + +// FragmentDefinition is a representation of the GraphQL FragmentDefinition. +// +// http://spec.graphql.org/draft/#FragmentDefinition +type FragmentDefinition struct { + Fragment + Name Ident + Directives DirectiveList + Loc errors.Location +} + +// FragmentSpread is a representation of the GraphQL FragmentSpread. +// +// http://spec.graphql.org/draft/#FragmentSpread +type FragmentSpread struct { + Name Ident + Directives DirectiveList + Loc errors.Location +} + +type FragmentList []*FragmentDefinition + +// Returns a FragmentDefinition by name or nil if not found. +func (l FragmentList) Get(name string) *FragmentDefinition { + for _, f := range l { + if f.Name.Name == name { + return f + } + } + return nil +} + +func (InlineFragment) isSelection() {} +func (FragmentSpread) isSelection() {} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/input.go b/vendor/github.com/graph-gophers/graphql-go/types/input.go new file mode 100644 index 000000000..c179bc3e8 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/input.go @@ -0,0 +1,47 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// InputValueDefinition is a representation of the GraphQL InputValueDefinition. +// +// http://spec.graphql.org/draft/#InputValueDefinition +type InputValueDefinition struct { + Name Ident + Type Type + Default Value + Desc string + Directives DirectiveList + Loc errors.Location + TypeLoc errors.Location +} + +type InputValueDefinitionList []*InputValueDefinition + +// Returns an InputValueDefinition by name or nil if not found. +func (l InputValueDefinitionList) Get(name string) *InputValueDefinition { + for _, v := range l { + if v.Name.Name == name { + return v + } + } + return nil +} + +// InputObject types define a set of input fields; the input fields are either scalars, enums, or +// other input objects. +// +// This allows arguments to accept arbitrarily complex structs. +// +// http://spec.graphql.org/draft/#sec-Input-Objects +type InputObject struct { + Name string + Desc string + Values ArgumentsDefinition + Directives DirectiveList + Loc errors.Location +} + +func (*InputObject) Kind() string { return "INPUT_OBJECT" } +func (t *InputObject) String() string { return t.Name } +func (t *InputObject) TypeName() string { return t.Name } +func (t *InputObject) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/interface.go b/vendor/github.com/graph-gophers/graphql-go/types/interface.go new file mode 100644 index 000000000..e741e5915 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/interface.go @@ -0,0 +1,25 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// InterfaceTypeDefinition recusrively defines list of named fields with their arguments via the +// implementation chain of interfaces. +// +// GraphQL objects can then implement these interfaces which requires that the object type will +// define all fields defined by those interfaces. +// +// http://spec.graphql.org/draft/#sec-Interfaces +type InterfaceTypeDefinition struct { + Name string + PossibleTypes []*ObjectTypeDefinition + Fields FieldsDefinition + Desc string + Directives DirectiveList + Loc errors.Location + Interfaces []*InterfaceTypeDefinition +} + +func (*InterfaceTypeDefinition) Kind() string { return "INTERFACE" } +func (t *InterfaceTypeDefinition) String() string { return t.Name } +func (t *InterfaceTypeDefinition) TypeName() string { return t.Name } +func (t *InterfaceTypeDefinition) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/object.go b/vendor/github.com/graph-gophers/graphql-go/types/object.go new file mode 100644 index 000000000..e65c79db8 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/object.go @@ -0,0 +1,25 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// ObjectTypeDefinition represents a GraphQL ObjectTypeDefinition. +// +// type FooObject { +// foo: String +// } +// +// https://spec.graphql.org/draft/#sec-Objects +type ObjectTypeDefinition struct { + Name string + Interfaces []*InterfaceTypeDefinition + Fields FieldsDefinition + Desc string + Directives DirectiveList + InterfaceNames []string + Loc errors.Location +} + +func (*ObjectTypeDefinition) Kind() string { return "OBJECT" } +func (t *ObjectTypeDefinition) String() string { return t.Name } +func (t *ObjectTypeDefinition) TypeName() string { return t.Name } +func (t *ObjectTypeDefinition) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/query.go b/vendor/github.com/graph-gophers/graphql-go/types/query.go new file mode 100644 index 000000000..caca6ef46 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/query.go @@ -0,0 +1,62 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// ExecutableDefinition represents a set of operations or fragments that can be executed +// against a schema. +// +// http://spec.graphql.org/draft/#ExecutableDefinition +type ExecutableDefinition struct { + Operations OperationList + Fragments FragmentList +} + +// OperationDefinition represents a GraphQL Operation. +// +// https://spec.graphql.org/draft/#sec-Language.Operations +type OperationDefinition struct { + Type OperationType + Name Ident + Vars ArgumentsDefinition + Selections SelectionSet + Directives DirectiveList + Loc errors.Location +} + +type OperationType string + +// A Selection is a field requested in a GraphQL operation. +// +// http://spec.graphql.org/draft/#Selection +type Selection interface { + isSelection() +} + +// A SelectionSet represents a collection of Selections +// +// http://spec.graphql.org/draft/#sec-Selection-Sets +type SelectionSet []Selection + +// Field represents a field used in a query. +type Field struct { + Alias Ident + Name Ident + Arguments ArgumentList + Directives DirectiveList + SelectionSet SelectionSet + SelectionSetLoc errors.Location +} + +func (Field) isSelection() {} + +type OperationList []*OperationDefinition + +// Get returns an OperationDefinition by name or nil if not found. +func (l OperationList) Get(name string) *OperationDefinition { + for _, f := range l { + if f.Name.Name == name { + return f + } + } + return nil +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/scalar.go b/vendor/github.com/graph-gophers/graphql-go/types/scalar.go new file mode 100644 index 000000000..5bd529a84 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/scalar.go @@ -0,0 +1,22 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// ScalarTypeDefinition types represent primitive leaf values (e.g. a string or an integer) in a GraphQL type +// system. +// +// GraphQL responses take the form of a hierarchical tree; the leaves on these trees are GraphQL +// scalars. +// +// http://spec.graphql.org/draft/#sec-Scalars +type ScalarTypeDefinition struct { + Name string + Desc string + Directives DirectiveList + Loc errors.Location +} + +func (*ScalarTypeDefinition) Kind() string { return "SCALAR" } +func (t *ScalarTypeDefinition) String() string { return t.Name } +func (t *ScalarTypeDefinition) TypeName() string { return t.Name } +func (t *ScalarTypeDefinition) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/schema.go b/vendor/github.com/graph-gophers/graphql-go/types/schema.go new file mode 100644 index 000000000..06811a97f --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/schema.go @@ -0,0 +1,42 @@ +package types + +// Schema represents a GraphQL service's collective type system capabilities. +// A schema is defined in terms of the types and directives it supports as well as the root +// operation types for each kind of operation: `query`, `mutation`, and `subscription`. +// +// For a more formal definition, read the relevant section in the specification: +// +// http://spec.graphql.org/draft/#sec-Schema +type Schema struct { + // EntryPoints determines the place in the type system where `query`, `mutation`, and + // `subscription` operations begin. + // + // http://spec.graphql.org/draft/#sec-Root-Operation-Types + // + EntryPoints map[string]NamedType + + // Types are the fundamental unit of any GraphQL schema. + // There are six kinds of named types, and two wrapping types. + // + // http://spec.graphql.org/draft/#sec-Types + Types map[string]NamedType + + // Directives are used to annotate various parts of a GraphQL document as an indicator that they + // should be evaluated differently by a validator, executor, or client tool such as a code + // generator. + // + // http://spec.graphql.org/#sec-Type-System.Directives + Directives map[string]*DirectiveDefinition + + UseFieldResolvers bool + + EntryPointNames map[string]string + Objects []*ObjectTypeDefinition + Unions []*Union + Enums []*EnumTypeDefinition + Extensions []*Extension +} + +func (s *Schema) Resolve(name string) Type { + return s.Types[name] +} diff --git a/vendor/github.com/graph-gophers/graphql-go/types/types.go b/vendor/github.com/graph-gophers/graphql-go/types/types.go new file mode 100644 index 000000000..df34d08ab --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/types.go @@ -0,0 +1,63 @@ +package types + +import ( + "github.com/graph-gophers/graphql-go/errors" +) + +// TypeName is a base building block for GraphQL type references. +type TypeName struct { + Ident +} + +// NamedType represents a type with a name. +// +// http://spec.graphql.org/draft/#NamedType +type NamedType interface { + Type + TypeName() string + Description() string +} + +type Ident struct { + Name string + Loc errors.Location +} + +type Type interface { + // Kind returns one possible GraphQL type kind. A type kind must be + // valid as defined by the GraphQL spec. + // + // https://spec.graphql.org/draft/#sec-Type-Kinds + Kind() string + + // String serializes a Type into a GraphQL specification format type. + // + // http://spec.graphql.org/draft/#sec-Serialization-Format + String() string +} + +// List represents a GraphQL ListType. +// +// http://spec.graphql.org/draft/#ListType +type List struct { + // OfType represents the inner-type of a List type. + // For example, the List type `[Foo]` has an OfType of Foo. + OfType Type +} + +// NonNull represents a GraphQL NonNullType. +// +// https://spec.graphql.org/draft/#NonNullType +type NonNull struct { + // OfType represents the inner-type of a NonNull type. + // For example, the NonNull type `Foo!` has an OfType of Foo. + OfType Type +} + +func (*List) Kind() string { return "LIST" } +func (*NonNull) Kind() string { return "NON_NULL" } +func (*TypeName) Kind() string { panic("TypeName needs to be resolved to actual type") } + +func (t *List) String() string { return "[" + t.OfType.String() + "]" } +func (t *NonNull) String() string { return t.OfType.String() + "!" } +func (*TypeName) String() string { panic("TypeName needs to be resolved to actual type") } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/union.go b/vendor/github.com/graph-gophers/graphql-go/types/union.go new file mode 100644 index 000000000..bb9166731 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/union.go @@ -0,0 +1,24 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// Union types represent objects that could be one of a list of GraphQL object types, but provides no +// guaranteed fields between those types. +// +// They also differ from interfaces in that object types declare what interfaces they implement, but +// are not aware of what unions contain them. +// +// http://spec.graphql.org/draft/#sec-Unions +type Union struct { + Name string + UnionMemberTypes []*ObjectTypeDefinition + Desc string + Directives DirectiveList + TypeNames []string + Loc errors.Location +} + +func (*Union) Kind() string { return "UNION" } +func (t *Union) String() string { return t.Name } +func (t *Union) TypeName() string { return t.Name } +func (t *Union) Description() string { return t.Desc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/value.go b/vendor/github.com/graph-gophers/graphql-go/types/value.go new file mode 100644 index 000000000..9f8d041a6 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/value.go @@ -0,0 +1,141 @@ +package types + +import ( + "strconv" + "strings" + "text/scanner" + + "github.com/graph-gophers/graphql-go/errors" +) + +// Value represents a literal input or literal default value in the GraphQL Specification. +// +// http://spec.graphql.org/draft/#sec-Input-Values +type Value interface { + // Deserialize transforms a GraphQL specification format literal into a Go type. + Deserialize(vars map[string]interface{}) interface{} + + // String serializes a Value into a GraphQL specification format literal. + String() string + Location() errors.Location +} + +// PrimitiveValue represents one of the following GraphQL scalars: Int, Float, +// String, or Boolean +type PrimitiveValue struct { + Type rune + Text string + Loc errors.Location +} + +func (val *PrimitiveValue) Deserialize(vars map[string]interface{}) interface{} { + switch val.Type { + case scanner.Int: + value, err := strconv.ParseInt(val.Text, 10, 32) + if err != nil { + panic(err) + } + return int32(value) + + case scanner.Float: + value, err := strconv.ParseFloat(val.Text, 64) + if err != nil { + panic(err) + } + return value + + case scanner.String: + value, err := strconv.Unquote(val.Text) + if err != nil { + panic(err) + } + return value + + case scanner.Ident: + switch val.Text { + case "true": + return true + case "false": + return false + default: + return val.Text + } + + default: + panic("invalid literal value") + } +} + +func (val *PrimitiveValue) String() string { return val.Text } +func (val *PrimitiveValue) Location() errors.Location { return val.Loc } + +// ListValue represents a literal list Value in the GraphQL specification. +// +// http://spec.graphql.org/draft/#sec-List-Value +type ListValue struct { + Values []Value + Loc errors.Location +} + +func (val *ListValue) Deserialize(vars map[string]interface{}) interface{} { + entries := make([]interface{}, len(val.Values)) + for i, entry := range val.Values { + entries[i] = entry.Deserialize(vars) + } + return entries +} + +func (val *ListValue) String() string { + entries := make([]string, len(val.Values)) + for i, entry := range val.Values { + entries[i] = entry.String() + } + return "[" + strings.Join(entries, ", ") + "]" +} + +func (val *ListValue) Location() errors.Location { return val.Loc } + +// ObjectValue represents a literal object Value in the GraphQL specification. +// +// http://spec.graphql.org/draft/#sec-Object-Value +type ObjectValue struct { + Fields []*ObjectField + Loc errors.Location +} + +// ObjectField represents field/value pairs in a literal ObjectValue. +type ObjectField struct { + Name Ident + Value Value +} + +func (val *ObjectValue) Deserialize(vars map[string]interface{}) interface{} { + fields := make(map[string]interface{}, len(val.Fields)) + for _, f := range val.Fields { + fields[f.Name.Name] = f.Value.Deserialize(vars) + } + return fields +} + +func (val *ObjectValue) String() string { + entries := make([]string, 0, len(val.Fields)) + for _, f := range val.Fields { + entries = append(entries, f.Name.Name+": "+f.Value.String()) + } + return "{" + strings.Join(entries, ", ") + "}" +} + +func (val *ObjectValue) Location() errors.Location { + return val.Loc +} + +// NullValue represents a literal `null` Value in the GraphQL specification. +// +// http://spec.graphql.org/draft/#sec-Null-Value +type NullValue struct { + Loc errors.Location +} + +func (val *NullValue) Deserialize(vars map[string]interface{}) interface{} { return nil } +func (val *NullValue) String() string { return "null" } +func (val *NullValue) Location() errors.Location { return val.Loc } diff --git a/vendor/github.com/graph-gophers/graphql-go/types/variable.go b/vendor/github.com/graph-gophers/graphql-go/types/variable.go new file mode 100644 index 000000000..1a4e2a515 --- /dev/null +++ b/vendor/github.com/graph-gophers/graphql-go/types/variable.go @@ -0,0 +1,15 @@ +package types + +import "github.com/graph-gophers/graphql-go/errors" + +// Variable is used in GraphQL operations to parameterize an input value. +// +// http://spec.graphql.org/draft/#Variable +type Variable struct { + Name string + Loc errors.Location +} + +func (v Variable) Deserialize(vars map[string]interface{}) interface{} { return vars[v.Name] } +func (v Variable) String() string { return "$" + v.Name } +func (v *Variable) Location() errors.Location { return v.Loc } diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml deleted file mode 100644 index 542ca8b7f..000000000 --- a/vendor/github.com/hashicorp/go-version/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.9 - - "1.10" - -script: - - go test diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md new file mode 100644 index 000000000..dbae7f7be --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -0,0 +1,25 @@ +# 1.3.0 (March 31, 2021) + +Please note that CHANGELOG.md does not exist in the source code prior to this release. + +FEATURES: + - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) + +# 1.2.1 (June 17, 2020) + +BUG FIXES: + - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) + +# 1.2.0 (April 23, 2019) + +FEATURES: + - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) + +# 1.1.0 (Jan 07, 2019) + +FEATURES: + - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) + +# 1.0.0 (August 24, 2018) + +Initial release. diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md index 6f3a15ce7..851a337be 100644 --- a/vendor/github.com/hashicorp/go-version/README.md +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -1,5 +1,6 @@ # Versioning Library for Go -[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) go-version is a library for parsing versions and version constraints, and verifying versions against a set of constraints. go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index 4d1e6e221..8068834ec 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -10,14 +10,25 @@ import ( ) // The compiled regular expression used to test the validity of a version. -var versionRegexp *regexp.Regexp +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) // The raw regular expression string used for testing the validity // of a version. -const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + - `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + - `?` +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) // Version represents a single version. type Version struct { @@ -30,12 +41,24 @@ type Version struct { func init() { versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") } // NewVersion parses the given version and returns a new // Version. func NewVersion(v string) (*Version, error) { - matches := versionRegexp.FindStringSubmatch(v) + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) if matches == nil { return nil, fmt.Errorf("Malformed version: %s", v) } @@ -89,7 +112,7 @@ func Must(v *Version, err error) *Version { // or larger than the other version, respectively. // // If you want boolean results, use the LessThan, Equal, -// or GreaterThan methods. +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. func (v *Version) Compare(other *Version) int { // A quick, efficient equality check if v.String() == other.String() { @@ -255,8 +278,20 @@ func comparePrereleases(v string, other string) int { return 0 } +// Core returns a new version constructed from only the MAJOR.MINOR.PATCH +// segments of the version, without prerelease or metadata. +func (v *Version) Core() *Version { + segments := v.Segments64() + segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) + return Must(NewVersion(segmentsOnly)) +} + // Equal tests if two versions are equal. func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + return v.Compare(o) == 0 } @@ -265,11 +300,21 @@ func (v *Version) GreaterThan(o *Version) bool { return v.Compare(o) > 0 } +// GreaterThanOrEqual tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + // LessThan tests if this version is less than another version. func (v *Version) LessThan(o *Version) bool { return v.Compare(o) < 0 } +// LessThanOrEqual tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + // Metadata returns any metadata that was part of the version // string. // diff --git a/vendor/github.com/hashicorp/logutils/go.mod b/vendor/github.com/hashicorp/logutils/go.mod new file mode 100644 index 000000000..ba38a4576 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/logutils diff --git a/vendor/github.com/hasura/go-graphql-client/LICENSE b/vendor/github.com/hasura/go-graphql-client/LICENSE new file mode 100644 index 000000000..91cac9f03 --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2017 Dmitri Shuralyov +Copyright (c) 2020 Hasura + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hasura/go-graphql-client/README.md b/vendor/github.com/hasura/go-graphql-client/README.md new file mode 100644 index 000000000..96b88370c --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/README.md @@ -0,0 +1,687 @@ +go-graphql-client +======= + +[![Build Status](https://travis-ci.org/hasura/go-graphql-client.svg?branch=master)](https://travis-ci.org/hasura/go-graphql-client.svg?branch=master) [![GoDoc](https://godoc.org/github.com/hasura/go-graphql-client?status.svg)](https://pkg.go.dev/github.com/hasura/go-graphql-client) + +**Preface:** This is a fork of `https://github.com/shurcooL/graphql` with extended features (subscription client, named operation) + +The subscription client follows Apollo client specification https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md, using websocket protocol with https://github.com/nhooyr/websocket, a minimal and idiomatic WebSocket library for Go. + +Package `graphql` provides a GraphQL client implementation. + +For more information, see package [`github.com/shurcooL/githubv4`](https://github.com/shurcooL/githubv4), which is a specialized version targeting GitHub GraphQL API v4. That package is driving the feature development. + +**Status:** In active early research and development. The API will change when opportunities for improvement are discovered; it is not yet frozen. + +- [go-graphql-client](#go-graphql-client) + - [Installation](#installation) + - [Usage](#usage) + - [Authentication](#authentication) + - [Simple Query](#simple-query) + - [Arguments and Variables](#arguments-and-variables) + - [Custom scalar tag](#custom-scalar-tag) + - [Inline Fragments](#inline-fragments) + - [Mutations](#mutations) + - [Mutations Without Fields](#mutations-without-fields) + - [Subscription](#subscription) + - [Usage](#usage-1) + - [Subscribe](#subscribe) + - [Authentication](#authentication-1) + - [Options](#options) + - [Events](#events) + - [Custom WebSocket client](#custom-websocket-client) + - [Options](#options-1) + - [With operation name (deprecated)](#with-operation-name-deprecated) + - [Raw bytes response](#raw-bytes-response) + - [Multiple mutations with ordered map](#multiple-mutations-with-ordered-map) + - [Directories](#directories) + - [References](#references) + - [License](#license) + +## Installation + +`go-graphql-client` requires Go version 1.13 or later. + +```bash +go get -u github.com/hasura/go-graphql-client +``` + +## Usage + +Construct a GraphQL client, specifying the GraphQL server URL. Then, you can use it to make GraphQL queries and mutations. + +```Go +client := graphql.NewClient("https://example.com/graphql", nil) +// Use client... +``` + +### Authentication + +Some GraphQL servers may require authentication. The `graphql` package does not directly handle authentication. Instead, when creating a new client, you're expected to pass an `http.Client` that performs authentication. The easiest and recommended way to do this is to use the [`golang.org/x/oauth2`](https://golang.org/x/oauth2) package. You'll need an OAuth token with the right scopes. Then: + +```Go +import "golang.org/x/oauth2" + +func main() { + src := oauth2.StaticTokenSource( + &oauth2.Token{AccessToken: os.Getenv("GRAPHQL_TOKEN")}, + ) + httpClient := oauth2.NewClient(context.Background(), src) + + client := graphql.NewClient("https://example.com/graphql", httpClient) + // Use client... +``` + +### Simple Query + +To make a GraphQL query, you need to define a corresponding Go type. + +For example, to make the following GraphQL query: + +```GraphQL +query { + me { + name + } +} +``` + +You can define this variable: + +```Go +var query struct { + Me struct { + Name graphql.String + } +} +``` + +Then call `client.Query`, passing a pointer to it: + +```Go +err := client.Query(context.Background(), &query, nil) +if err != nil { + // Handle error. +} +fmt.Println(query.Me.Name) + +// Output: Luke Skywalker +``` + +### Arguments and Variables + +Often, you'll want to specify arguments on some fields. You can use the `graphql` struct field tag for this. + +For example, to make the following GraphQL query: + +```GraphQL +{ + human(id: "1000") { + name + height(unit: METER) + } +} +``` + +You can define this variable: + +```Go +var q struct { + Human struct { + Name graphql.String + Height graphql.Float `graphql:"height(unit: METER)"` + } `graphql:"human(id: \"1000\")"` +} +``` + +Then call `client.Query`: + +```Go +err := client.Query(context.Background(), &q, nil) +if err != nil { + // Handle error. +} +fmt.Println(q.Human.Name) +fmt.Println(q.Human.Height) + +// Output: +// Luke Skywalker +// 1.72 +``` + +However, that'll only work if the arguments are constant and known in advance. Otherwise, you will need to make use of variables. Replace the constants in the struct field tag with variable names: + +```Go +var q struct { + Human struct { + Name graphql.String + Height graphql.Float `graphql:"height(unit: $unit)"` + } `graphql:"human(id: $id)"` +} +``` + +Then, define a `variables` map with their values: + +```Go +variables := map[string]interface{}{ + "id": graphql.ID(id), + "unit": starwars.LengthUnit("METER"), +} +``` + +Finally, call `client.Query` providing `variables`: + +```Go +err := client.Query(context.Background(), &q, variables) +if err != nil { + // Handle error. +} +``` + +### Custom scalar tag + +Because the generator reflects recursively struct objects, it can't know if the struct is a custom scalar such as JSON. To avoid expansion of the field during query generation, let's add the tag `scalar:"true"` to the custom scalar. If the scalar implements the JSON decoder interface, it will be automatically decoded. + +```Go +struct { + Viewer struct { + ID interface{} + Login string + CreatedAt time.Time + DatabaseID int + } +} + +// Output: +// { +// viewer { +// id +// login +// createdAt +// databaseId +// } +// } + +struct { + Viewer struct { + ID interface{} + Login string + CreatedAt time.Time + DatabaseID int + } `scalar:"true"` +} + +// Output +// { viewer } +``` + +### Inline Fragments + +Some GraphQL queries contain inline fragments. You can use the `graphql` struct field tag to express them. + +For example, to make the following GraphQL query: + +```GraphQL +{ + hero(episode: "JEDI") { + name + ... on Droid { + primaryFunction + } + ... on Human { + height + } + } +} +``` + +You can define this variable: + +```Go +var q struct { + Hero struct { + Name graphql.String + Droid struct { + PrimaryFunction graphql.String + } `graphql:"... on Droid"` + Human struct { + Height graphql.Float + } `graphql:"... on Human"` + } `graphql:"hero(episode: \"JEDI\")"` +} +``` + +Alternatively, you can define the struct types corresponding to inline fragments, and use them as embedded fields in your query: + +```Go +type ( + DroidFragment struct { + PrimaryFunction graphql.String + } + HumanFragment struct { + Height graphql.Float + } +) + +var q struct { + Hero struct { + Name graphql.String + DroidFragment `graphql:"... on Droid"` + HumanFragment `graphql:"... on Human"` + } `graphql:"hero(episode: \"JEDI\")"` +} +``` + +Then call `client.Query`: + +```Go +err := client.Query(context.Background(), &q, nil) +if err != nil { + // Handle error. +} +fmt.Println(q.Hero.Name) +fmt.Println(q.Hero.PrimaryFunction) +fmt.Println(q.Hero.Height) + +// Output: +// R2-D2 +// Astromech +// 0 +``` + +### Mutations + +Mutations often require information that you can only find out by performing a query first. Let's suppose you've already done that. + +For example, to make the following GraphQL mutation: + +```GraphQL +mutation($ep: Episode!, $review: ReviewInput!) { + createReview(episode: $ep, review: $review) { + stars + commentary + } +} +variables { + "ep": "JEDI", + "review": { + "stars": 5, + "commentary": "This is a great movie!" + } +} +``` + +You can define: + +```Go +var m struct { + CreateReview struct { + Stars graphql.Int + Commentary graphql.String + } `graphql:"createReview(episode: $ep, review: $review)"` +} +variables := map[string]interface{}{ + "ep": starwars.Episode("JEDI"), + "review": starwars.ReviewInput{ + Stars: graphql.Int(5), + Commentary: graphql.String("This is a great movie!"), + }, +} +``` + +Then call `client.Mutate`: + +```Go +err := client.Mutate(context.Background(), &m, variables) +if err != nil { + // Handle error. +} +fmt.Printf("Created a %v star review: %v\n", m.CreateReview.Stars, m.CreateReview.Commentary) + +// Output: +// Created a 5 star review: This is a great movie! +``` + +#### Mutations Without Fields + +Sometimes, you don't need any fields returned from a mutation. Doing that is easy. + +For example, to make the following GraphQL mutation: + +```GraphQL +mutation($ep: Episode!, $review: ReviewInput!) { + createReview(episode: $ep, review: $review) +} +variables { + "ep": "JEDI", + "review": { + "stars": 5, + "commentary": "This is a great movie!" + } +} +``` + +You can define: + +```Go +var m struct { + CreateReview string `graphql:"createReview(episode: $ep, review: $review)"` +} +variables := map[string]interface{}{ + "ep": starwars.Episode("JEDI"), + "review": starwars.ReviewInput{ + Stars: graphql.Int(5), + Commentary: graphql.String("This is a great movie!"), + }, +} +``` + +Then call `client.Mutate`: + +```Go +err := client.Mutate(context.Background(), &m, variables) +if err != nil { + // Handle error. +} +fmt.Printf("Created a review: %s.\n", m.CreateReview) + +// Output: +// Created a review: . +``` + +### Subscription + +#### Usage + +Construct a Subscription client, specifying the GraphQL server URL. + +```Go +client := graphql.NewSubscriptionClient("wss://example.com/graphql") +defer client.Close() + +// Subscribe subscriptions +// ... +// finally run the client +client.Run() +``` + +#### Subscribe + +To make a GraphQL subscription, you need to define a corresponding Go type. + +For example, to make the following GraphQL query: + +```GraphQL +subscription { + me { + name + } +} +``` + +You can define this variable: + +```Go +var subscription struct { + Me struct { + Name graphql.String + } +} +``` + +Then call `client.Subscribe`, passing a pointer to it: + +```Go +subscriptionId, err := client.Subscribe(&query, nil, func(dataValue *json.RawMessage, errValue error) error { + if errValue != nil { + // handle error + // if returns error, it will failback to `onError` event + return nil + } + data := query{} + err := json.Unmarshal(dataValue, &data) + + fmt.Println(query.Me.Name) + + // Output: Luke Skywalker +}) + +if err != nil { + // Handle error. +} + +// you can unsubscribe the subscription while the client is running +client.Unsubscribe(subscriptionId) +``` + +#### Authentication + +The subscription client is authenticated with GraphQL server through connection params: + +```Go +client := graphql.NewSubscriptionClient("wss://example.com/graphql"). + WithConnectionParams(map[string]interface{}{ + "headers": map[string]string{ + "authentication": "...", + }, + }) + +``` + +#### Options + +```Go +client. + // write timeout of websocket client + WithTimeout(time.Minute). + // When the websocket server was stopped, the client will retry connecting every second until timeout + WithRetryTimeout(time.Minute). + // sets loging function to print out received messages. By default, nothing is printed + WithLog(log.Println). + // max size of response message + WithReadLimit(10*1024*1024). + // these operation event logs won't be printed + WithoutLogTypes(graphql.GQL_DATA, graphql.GQL_CONNECTION_KEEP_ALIVE) + +``` + +#### Events + +```Go +// OnConnected event is triggered when the websocket connected to GraphQL server sucessfully +client.OnConnected(fn func()) + +// OnDisconnected event is triggered when the websocket server was stil down after retry timeout +client.OnDisconnected(fn func()) + +// OnConnected event is triggered when there is any connection error. This is bottom exception handler level +// If this function is empty, or returns nil, the error is ignored +// If returns error, the websocket connection will be terminated +client.OnError(onError func(sc *SubscriptionClient, err error) error) +``` + +#### Custom WebSocket client + +By default the subscription client uses [nhooyr WebSocket client](https://github.com/nhooyr/websocket). If you need to customize the client, or prefer using [Gorilla WebSocket](https://github.com/gorilla/websocket), let's follow the Websocket interface and replace the constructor with `WithWebSocket` method: + +```go +// WebsocketHandler abstracts WebSocket connection functions +// ReadJSON and WriteJSON data of a frame from the WebSocket connection. +// Close the WebSocket connection. +type WebsocketConn interface { + ReadJSON(v interface{}) error + WriteJSON(v interface{}) error + Close() error + // SetReadLimit sets the maximum size in bytes for a message read from the peer. If a + // message exceeds the limit, the connection sends a close message to the peer + // and returns ErrReadLimit to the application. + SetReadLimit(limit int64) +} + +// WithWebSocket replaces customized websocket client constructor +func (sc *SubscriptionClient) WithWebSocket(fn func(sc *SubscriptionClient) (WebsocketConn, error)) *SubscriptionClient +``` + +**Example** + +```Go + +// the default websocket constructor +func newWebsocketConn(sc *SubscriptionClient) (WebsocketConn, error) { + options := &websocket.DialOptions{ + Subprotocols: []string{"graphql-ws"}, + } + c, _, err := websocket.Dial(sc.GetContext(), sc.GetURL(), options) + if err != nil { + return nil, err + } + + // The default WebsocketHandler implementation using nhooyr's + return &WebsocketHandler{ + ctx: sc.GetContext(), + Conn: c, + timeout: sc.GetTimeout(), + }, nil +} + +client := graphql.NewSubscriptionClient("wss://example.com/graphql") +defer client.Close() + +client.WithWebSocket(newWebsocketConn) + +client.Run() +``` + +### Options + +There are extensible parts in the GraphQL query that we sometimes use. They are optional so that we shouldn't required them in the method. To make it flexible, we can abstract these options as optional arguments that follow this interface. + +```go +type Option interface { + Type() OptionType + String() string +} + +client.Query(ctx context.Context, q interface{}, variables map[string]interface{}, options ...Option) error +``` + +Currently we support 2 option types: `operation_name` and `operation_directive`. The operation name option is built-in because it is unique. We can use the option directly with `OperationName` + +```go +// query MyQuery { +// ... +// } +client.Query(ctx, &q, variables, graphql.OperationName("MyQuery")) +``` + +In contrast, operation directive is various and customizable on different GraphQL servers. There isn't any built-in directive in the library. You need to define yourself. For example: + +```go +// define @cached directive for Hasura queries +// https://hasura.io/docs/latest/graphql/cloud/response-caching.html#enable-caching +type cachedDirective struct { + ttl int +} + +func (cd cachedDirective) Type() OptionType { + // operation_directive + return graphql.OptionTypeOperationDirective +} + +func (cd cachedDirective) String() string { + if cd.ttl <= 0 { + return "@cached" + } + return fmt.Sprintf("@cached(ttl: %d)", cd.ttl) +} + +// query MyQuery @cached { +// ... +// } +client.Query(ctx, &q, variables, graphql.OperationName("MyQuery"), cachedDirective{}) +``` + +### With operation name (deprecated) + +Operation name is still on API decision plan https://github.com/shurcooL/graphql/issues/12. However, in my opinion separate methods are easier choice to avoid breaking changes + +```Go +func (c *Client) NamedQuery(ctx context.Context, name string, q interface{}, variables map[string]interface{}) error + +func (c *Client) NamedMutate(ctx context.Context, name string, q interface{}, variables map[string]interface{}) error + +func (sc *SubscriptionClient) NamedSubscribe(name string, v interface{}, variables map[string]interface{}, handler func(message *json.RawMessage, err error) error) (string, error) +``` + +### Raw bytes response + +In the case we developers want to decode JSON response ourself. Moreover, the default `UnmarshalGraphQL` function isn't ideal with complicated nested interfaces + +```Go +func (c *Client) QueryRaw(ctx context.Context, q interface{}, variables map[string]interface{}) (*json.RawMessage, error) + +func (c *Client) MutateRaw(ctx context.Context, q interface{}, variables map[string]interface{}) (*json.RawMessage, error) + +func (c *Client) NamedQueryRaw(ctx context.Context, name string, q interface{}, variables map[string]interface{}) (*json.RawMessage, error) + +func (c *Client) NamedMutateRaw(ctx context.Context, name string, q interface{}, variables map[string]interface{}) (*json.RawMessage, error) +``` + +### Multiple mutations with ordered map + +You might need to make multiple mutations in single query. It's not very convenient with structs +so you can use ordered map `[][2]interface{}` instead. + +For example, to make the following GraphQL mutation: + +```GraphQL +mutation($login1: String!, $login2: String!, $login3: String!) { + createUser(login: $login1) { login } + createUser(login: $login2) { login } + createUser(login: $login3) { login } +} +variables { + "login1": "grihabor", + "login2": "diman", + "login3": "indigo" +} +``` + +You can define: + +```Go +type CreateUser struct { + Login graphql.String +} +m := [][2]interface{}{ + {"createUser(login: $login1)", &CreateUser{}}, + {"createUser(login: $login2)", &CreateUser{}}, + {"createUser(login: $login3)", &CreateUser{}}, +} +variables := map[string]interface{}{ + "login1": graphql.String("grihabor"), + "login2": graphql.String("diman"), + "login3": graphql.String("indigo"), +} +``` + +Directories +----------- + +| Path | Synopsis | +|----------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------| +| [example/graphqldev](https://godoc.org/github.com/shurcooL/graphql/example/graphqldev) | graphqldev is a test program currently being used for developing graphql package. | +| [ident](https://godoc.org/github.com/shurcooL/graphql/ident) | Package ident provides functions for parsing and converting identifier names between various naming convention. | +| [internal/jsonutil](https://godoc.org/github.com/shurcooL/graphql/internal/jsonutil) | Package jsonutil provides a function for decoding JSON into a GraphQL query data structure. | + +References +---------- +- https://github.com/shurcooL/graphql +- https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md +- https://github.com/nhooyr/websocket + + +License +------- + +- [MIT License](LICENSE) diff --git a/vendor/github.com/hasura/go-graphql-client/doc.go b/vendor/github.com/hasura/go-graphql-client/doc.go new file mode 100644 index 000000000..69ec4e038 --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/doc.go @@ -0,0 +1,11 @@ +// Package graphql provides a GraphQL client implementation. +// +// For more information, see package github.com/shurcooL/githubv4, +// which is a specialized version targeting GitHub GraphQL API v4. +// That package is driving the feature development. +// +// Status: In active early research and development. The API will change when +// opportunities for improvement are discovered; it is not yet frozen. +// +// For now, see README for more details. +package graphql // import "github.com/shurcooL/graphql" diff --git a/vendor/github.com/hasura/go-graphql-client/go.mod b/vendor/github.com/hasura/go-graphql-client/go.mod new file mode 100644 index 000000000..3491aa5dc --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/go.mod @@ -0,0 +1,9 @@ +module github.com/hasura/go-graphql-client + +go 1.14 + +require ( + github.com/google/uuid v1.3.0 + github.com/graph-gophers/graphql-go v1.2.0 + nhooyr.io/websocket v1.8.7 +) diff --git a/vendor/github.com/hasura/go-graphql-client/go.sum b/vendor/github.com/hasura/go-graphql-client/go.sum new file mode 100644 index 000000000..195d0d255 --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/go.sum @@ -0,0 +1,71 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/graph-gophers/graphql-go v1.2.0 h1:j3tCG0UcE+3f84OAw/4/6YQKyTr+r0yuUKtnxiu5OH4= +github.com/graph-gophers/graphql-go v1.2.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= +nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= diff --git a/vendor/github.com/hasura/go-graphql-client/graphql.go b/vendor/github.com/hasura/go-graphql-client/graphql.go new file mode 100644 index 000000000..2bfea639e --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/graphql.go @@ -0,0 +1,280 @@ +package graphql + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/hasura/go-graphql-client/internal/jsonutil" +) + +// This function allows you to tweak the HTTP request. It might be useful to set authentication +// headers amongst other things +type RequestModifier func(*http.Request) + +// Client is a GraphQL client. +type Client struct { + url string // GraphQL server URL. + httpClient *http.Client + requestModifier RequestModifier +} + +// NewClient creates a GraphQL client targeting the specified GraphQL server URL. +// If httpClient is nil, then http.DefaultClient is used. +func NewClient(url string, httpClient *http.Client) *Client { + if httpClient == nil { + httpClient = http.DefaultClient + } + return &Client{ + url: url, + httpClient: httpClient, + requestModifier: nil, + } +} + +// Query executes a single GraphQL query request, +// with a query derived from q, populating the response into it. +// q should be a pointer to struct that corresponds to the GraphQL schema. +func (c *Client) Query(ctx context.Context, q interface{}, variables map[string]interface{}, options ...Option) error { + return c.do(ctx, queryOperation, q, variables, options...) +} + +// NamedQuery executes a single GraphQL query request, with operation name +// +// Deprecated: this is the shortcut of Query method, with NewOperationName option +func (c *Client) NamedQuery(ctx context.Context, name string, q interface{}, variables map[string]interface{}, options ...Option) error { + return c.do(ctx, queryOperation, q, variables, append(options, OperationName(name))...) +} + +// Mutate executes a single GraphQL mutation request, +// with a mutation derived from m, populating the response into it. +// m should be a pointer to struct that corresponds to the GraphQL schema. +func (c *Client) Mutate(ctx context.Context, m interface{}, variables map[string]interface{}, options ...Option) error { + return c.do(ctx, mutationOperation, m, variables, options...) +} + +// NamedMutate executes a single GraphQL mutation request, with operation name +// +// Deprecated: this is the shortcut of Mutate method, with NewOperationName option +func (c *Client) NamedMutate(ctx context.Context, name string, m interface{}, variables map[string]interface{}, options ...Option) error { + return c.do(ctx, mutationOperation, m, variables, append(options, OperationName(name))...) +} + +// Query executes a single GraphQL query request, +// with a query derived from q, populating the response into it. +// q should be a pointer to struct that corresponds to the GraphQL schema. +// return raw bytes message. +func (c *Client) QueryRaw(ctx context.Context, q interface{}, variables map[string]interface{}, options ...Option) (*json.RawMessage, error) { + return c.doRaw(ctx, queryOperation, q, variables, options...) +} + +// NamedQueryRaw executes a single GraphQL query request, with operation name +// return raw bytes message. +func (c *Client) NamedQueryRaw(ctx context.Context, name string, q interface{}, variables map[string]interface{}, options ...Option) (*json.RawMessage, error) { + return c.doRaw(ctx, queryOperation, q, variables, append(options, OperationName(name))...) +} + +// MutateRaw executes a single GraphQL mutation request, +// with a mutation derived from m, populating the response into it. +// m should be a pointer to struct that corresponds to the GraphQL schema. +// return raw bytes message. +func (c *Client) MutateRaw(ctx context.Context, m interface{}, variables map[string]interface{}, options ...Option) (*json.RawMessage, error) { + return c.doRaw(ctx, mutationOperation, m, variables, options...) +} + +// NamedMutateRaw executes a single GraphQL mutation request, with operation name +// return raw bytes message. +func (c *Client) NamedMutateRaw(ctx context.Context, name string, m interface{}, variables map[string]interface{}, options ...Option) (*json.RawMessage, error) { + return c.doRaw(ctx, mutationOperation, m, variables, append(options, OperationName(name))...) +} + +// do executes a single GraphQL operation. +// return raw message and error +func (c *Client) doRaw(ctx context.Context, op operationType, v interface{}, variables map[string]interface{}, options ...Option) (*json.RawMessage, error) { + var query string + var err error + switch op { + case queryOperation: + query, err = constructQuery(v, variables, options...) + case mutationOperation: + query, err = constructMutation(v, variables, options...) + } + + if err != nil { + return nil, err + } + + in := struct { + Query string `json:"query"` + Variables map[string]interface{} `json:"variables,omitempty"` + }{ + Query: query, + Variables: variables, + } + var buf bytes.Buffer + err = json.NewEncoder(&buf).Encode(in) + if err != nil { + return nil, err + } + + request, err := http.NewRequestWithContext(ctx, http.MethodPost, c.url, &buf) + if err != nil { + return nil, fmt.Errorf("problem constructing request: %w", err) + } + request.Header.Add("Content-Type", "application/json") + + if c.requestModifier != nil { + c.requestModifier(request) + } + + resp, err := c.httpClient.Do(request) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + body, _ := ioutil.ReadAll(resp.Body) + return nil, fmt.Errorf("non-200 OK status code: %v body: %q", resp.Status, body) + } + var out struct { + Data *json.RawMessage + Errors Errors + } + err = json.NewDecoder(resp.Body).Decode(&out) + if err != nil { + // TODO: Consider including response body in returned error, if deemed helpful. + return nil, err + } + + if len(out.Errors) > 0 { + return out.Data, out.Errors + } + + return out.Data, nil +} + +// do executes a single GraphQL operation and unmarshal json. +func (c *Client) do(ctx context.Context, op operationType, v interface{}, variables map[string]interface{}, options ...Option) error { + var query string + var err error + switch op { + case queryOperation: + query, err = constructQuery(v, variables, options...) + case mutationOperation: + query, err = constructMutation(v, variables, options...) + } + + if err != nil { + return err + } + + in := struct { + Query string `json:"query"` + Variables map[string]interface{} `json:"variables,omitempty"` + }{ + Query: query, + Variables: variables, + } + var buf bytes.Buffer + err = json.NewEncoder(&buf).Encode(in) + if err != nil { + return err + } + + request, err := http.NewRequestWithContext(ctx, http.MethodPost, c.url, &buf) + if err != nil { + return fmt.Errorf("problem constructing request: %w", err) + } + request.Header.Add("Content-Type", "application/json") + + if c.requestModifier != nil { + c.requestModifier(request) + } + + resp, err := c.httpClient.Do(request) + if err != nil { + return err + } + defer resp.Body.Close() + + r := resp.Body + if resp.Header.Get("Content-Encoding") == "gzip" { + r, err = gzip.NewReader(r) + if err != nil { + return fmt.Errorf("problem trying to create gzip reader: %w", err) + } + defer r.Close() + } + + if resp.StatusCode != http.StatusOK { + body, _ := ioutil.ReadAll(r) + return fmt.Errorf("non-200 OK status code: %v body: %q", resp.Status, body) + } + var out struct { + Data *json.RawMessage + Errors Errors + } + + err = json.NewDecoder(r).Decode(&out) + if err != nil { + // TODO: Consider including response body in returned error, if deemed helpful. + return err + } + if out.Data != nil { + err := jsonutil.UnmarshalGraphQL(*out.Data, v) + if err != nil { + // TODO: Consider including response body in returned error, if deemed helpful. + return err + } + } + if len(out.Errors) > 0 { + return out.Errors + } + return nil +} + +// Returns a copy of the client with the request modifier set. This allows you to reuse the same +// TCP connection for multiple slghtly different requests to the same server +// (i.e. different authentication headers for multitenant applications) +func (c *Client) WithRequestModifier(f RequestModifier) *Client { + return &Client{ + url: c.url, + httpClient: c.httpClient, + requestModifier: f, + } +} + +// errors represents the "errors" array in a response from a GraphQL server. +// If returned via error interface, the slice is expected to contain at least 1 element. +// +// Specification: https://facebook.github.io/graphql/#sec-Errors. +type Errors []struct { + Message string + Extensions map[string]interface{} + Locations []struct { + Line int + Column int + } +} + +// Error implements error interface. +func (e Errors) Error() string { + b := strings.Builder{} + for _, err := range e { + b.WriteString(fmt.Sprintf("Message: %s, Locations: %+v", err.Message, err.Locations)) + } + return b.String() +} + +type operationType uint8 + +const ( + queryOperation operationType = iota + mutationOperation + // subscriptionOperation // Unused. +) diff --git a/vendor/github.com/hasura/go-graphql-client/ident/ident.go b/vendor/github.com/hasura/go-graphql-client/ident/ident.go new file mode 100644 index 000000000..29e498ed9 --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/ident/ident.go @@ -0,0 +1,240 @@ +// Package ident provides functions for parsing and converting identifier names +// between various naming convention. It has support for MixedCaps, lowerCamelCase, +// and SCREAMING_SNAKE_CASE naming conventions. +package ident + +import ( + "strings" + "unicode" + "unicode/utf8" +) + +// ParseMixedCaps parses a MixedCaps identifier name. +// +// E.g., "ClientMutationID" -> {"Client", "Mutation", "ID"}. +func ParseMixedCaps(name string) Name { + var words Name + + // Split name at any lower -> Upper or Upper -> Upper,lower transitions. + // Check each word for initialisms. + runes := []rune(name) + w, i := 0, 0 // Index of start of word, scan. + for i+1 <= len(runes) { + eow := false // Whether we hit the end of a word. + if i+1 == len(runes) { + eow = true + } else if unicode.IsLower(runes[i]) && unicode.IsUpper(runes[i+1]) { + // lower -> Upper. + eow = true + } else if i+2 < len(runes) && unicode.IsUpper(runes[i]) && unicode.IsUpper(runes[i+1]) && unicode.IsLower(runes[i+2]) { + // Upper -> Upper,lower. End of acronym, followed by a word. + eow = true + + if string(runes[i:i+3]) == "IDs" { // Special case, plural form of ID initialism. + eow = false + } + } + i++ + if !eow { + continue + } + + // [w, i) is a word. + word := string(runes[w:i]) + if initialism, ok := isInitialism(word); ok { + words = append(words, initialism) + } else if i1, i2, ok := isTwoInitialisms(word); ok { + words = append(words, i1, i2) + } else { + words = append(words, word) + } + w = i + } + return words +} + +// ParseLowerCamelCase parses a lowerCamelCase identifier name. +// +// E.g., "clientMutationId" -> {"client", "Mutation", "Id"}. +func ParseLowerCamelCase(name string) Name { + var words Name + + // Split name at any Upper letters. + runes := []rune(name) + w, i := 0, 0 // Index of start of word, scan. + for i+1 <= len(runes) { + eow := false // Whether we hit the end of a word. + if i+1 == len(runes) { + eow = true + } else if unicode.IsUpper(runes[i+1]) { + // Upper letter. + eow = true + } + i++ + if !eow { + continue + } + + // [w, i) is a word. + words = append(words, string(runes[w:i])) + w = i + } + return words +} + +// ParseScreamingSnakeCase parses a SCREAMING_SNAKE_CASE identifier name. +// +// E.g., "CLIENT_MUTATION_ID" -> {"CLIENT", "MUTATION", "ID"}. +func ParseScreamingSnakeCase(name string) Name { + var words Name + + // Split name at '_' characters. + runes := []rune(name) + w, i := 0, 0 // Index of start of word, scan. + for i+1 <= len(runes) { + eow := false // Whether we hit the end of a word. + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' { + // Underscore. + eow = true + } + i++ + if !eow { + continue + } + + // [w, i) is a word. + words = append(words, string(runes[w:i])) + if i < len(runes) && runes[i] == '_' { + // Skip underscore. + i++ + } + w = i + } + return words +} + +// Name is an identifier name, broken up into individual words. +type Name []string + +// ToMixedCaps expresses identifer name in MixedCaps naming convention. +// +// E.g., "ClientMutationID". +func (n Name) ToMixedCaps() string { + for i, word := range n { + if strings.EqualFold(word, "IDs") { // Special case, plural form of ID initialism. + n[i] = "IDs" + continue + } + if initialism, ok := isInitialism(word); ok { + n[i] = initialism + continue + } + if brand, ok := isBrand(word); ok { + n[i] = brand + continue + } + r, size := utf8.DecodeRuneInString(word) + n[i] = string(unicode.ToUpper(r)) + strings.ToLower(word[size:]) + } + return strings.Join(n, "") +} + +// ToLowerCamelCase expresses identifer name in lowerCamelCase naming convention. +// +// E.g., "clientMutationId". +func (n Name) ToLowerCamelCase() string { + for i, word := range n { + if i == 0 { + n[i] = strings.ToLower(word) + continue + } + r, size := utf8.DecodeRuneInString(word) + n[i] = string(unicode.ToUpper(r)) + strings.ToLower(word[size:]) + } + return strings.Join(n, "") +} + +// isInitialism reports whether word is an initialism. +func isInitialism(word string) (string, bool) { + initialism := strings.ToUpper(word) + _, ok := initialisms[initialism] + return initialism, ok +} + +// isTwoInitialisms reports whether word is two initialisms. +func isTwoInitialisms(word string) (string, string, bool) { + word = strings.ToUpper(word) + for i := 2; i <= len(word)-2; i++ { // Shortest initialism is 2 characters long. + _, ok1 := initialisms[word[:i]] + _, ok2 := initialisms[word[i:]] + if ok1 && ok2 { + return word[:i], word[i:], true + } + } + return "", "", false +} + +// initialisms is the set of initialisms in the MixedCaps naming convention. +// Only add entries that are highly unlikely to be non-initialisms. +// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. +var initialisms = map[string]struct{}{ + // These are the common initialisms from golint. Keep them in sync + // with https://gotools.org/github.com/golang/lint#commonInitialisms. + "ACL": {}, + "API": {}, + "ASCII": {}, + "CPU": {}, + "CSS": {}, + "DNS": {}, + "EOF": {}, + "GUID": {}, + "HTML": {}, + "HTTP": {}, + "HTTPS": {}, + "ID": {}, + "IP": {}, + "JSON": {}, + "LHS": {}, + "QPS": {}, + "RAM": {}, + "RHS": {}, + "RPC": {}, + "SLA": {}, + "SMTP": {}, + "SQL": {}, + "SSH": {}, + "TCP": {}, + "TLS": {}, + "TTL": {}, + "UDP": {}, + "UI": {}, + "UID": {}, + "UUID": {}, + "URI": {}, + "URL": {}, + "UTF8": {}, + "VM": {}, + "XML": {}, + "XMPP": {}, + "XSRF": {}, + "XSS": {}, + + // Additional common initialisms. + "RSS": {}, +} + +// isBrand reports whether word is a brand. +func isBrand(word string) (string, bool) { + brand, ok := brands[strings.ToLower(word)] + return brand, ok +} + +// brands is the map of brands in the MixedCaps naming convention; +// see https://dmitri.shuralyov.com/idiomatic-go#for-brands-or-words-with-more-than-1-capital-letter-lowercase-all-letters. +// Key is the lower case version of the brand, value is the canonical brand spelling. +// Only add entries that are highly unlikely to be non-brands. +var brands = map[string]string{ + "github": "GitHub", +} diff --git a/vendor/github.com/hasura/go-graphql-client/internal/jsonutil/graphql.go b/vendor/github.com/hasura/go-graphql-client/internal/jsonutil/graphql.go new file mode 100644 index 000000000..69ff527de --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/internal/jsonutil/graphql.go @@ -0,0 +1,467 @@ +// Package jsonutil provides a function for decoding JSON +// into a GraphQL query data structure. +package jsonutil + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +// UnmarshalGraphQL parses the JSON-encoded GraphQL response data and stores +// the result in the GraphQL query data structure pointed to by v. +// +// The implementation is created on top of the JSON tokenizer available +// in "encoding/json".Decoder. +func UnmarshalGraphQL(data []byte, v interface{}) error { + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + err := (&decoder{tokenizer: dec}).Decode(v) + if err != nil { + return err + } + tok, err := dec.Token() + switch err { + case io.EOF: + // Expect to get io.EOF. There shouldn't be any more + // tokens left after we've decoded v successfully. + return nil + case nil: + return fmt.Errorf("invalid token '%v' after top-level value", tok) + default: + return err + } +} + +// decoder is a JSON decoder that performs custom unmarshaling behavior +// for GraphQL query data structures. It's implemented on top of a JSON tokenizer. +type decoder struct { + tokenizer interface { + Token() (json.Token, error) + Decode(v interface{}) error + } + + // Stack of what part of input JSON we're in the middle of - objects, arrays. + parseState []json.Delim + + // Stacks of values where to unmarshal. + // The top of each stack is the reflect.Value where to unmarshal next JSON value. + // + // The reason there's more than one stack is because we might be unmarshaling + // a single JSON value into multiple GraphQL fragments or embedded structs, so + // we keep track of them all. + vs []stack +} + +type stack []reflect.Value + +func (s stack) Top() reflect.Value { + return s[len(s)-1] +} + +func (s stack) Pop() stack { + return s[:len(s)-1] +} + +// Decode decodes a single JSON value from d.tokenizer into v. +func (d *decoder) Decode(v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("cannot decode into non-pointer %T", v) + } + d.vs = []stack{{rv.Elem()}} + return d.decode() +} + +// decode decodes a single JSON value from d.tokenizer into d.vs. +func (d *decoder) decode() error { + rawMessageValue := reflect.ValueOf(json.RawMessage{}) + + // The loop invariant is that the top of each d.vs stack + // is where we try to unmarshal the next JSON value we see. + for len(d.vs) > 0 { + var tok interface{} + tok, err := d.tokenizer.Token() + + if err == io.EOF { + return errors.New("unexpected end of JSON input") + } else if err != nil { + return err + } + + switch { + + // Are we inside an object and seeing next key (rather than end of object)? + case d.state() == '{' && tok != json.Delim('}'): + key, ok := tok.(string) + if !ok { + return errors.New("unexpected non-key in JSON input") + } + someFieldExist := false + // If one field is raw all must be treated as raw + rawMessage := false + isScalar := false + for i := range d.vs { + v := d.vs[i].Top() + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + v = v.Elem() + } + var f reflect.Value + switch v.Kind() { + case reflect.Struct: + f, isScalar = fieldByGraphQLName(v, key) + if f.IsValid() { + someFieldExist = true + // Check for special embedded json + if f.Type() == rawMessageValue.Type() { + rawMessage = true + } + } + case reflect.Slice: + f = orderedMapValueByGraphQLName(v, key) + if f.IsValid() { + someFieldExist = true + } + } + d.vs[i] = append(d.vs[i], f) + } + if !someFieldExist { + return fmt.Errorf("struct field for %q doesn't exist in any of %v places to unmarshal", key, len(d.vs)) + } + + if rawMessage || isScalar { + // Read the next complete object from the json stream + var data json.RawMessage + err = d.tokenizer.Decode(&data) + if err != nil { + return err + } + tok = data + } else { + // We've just consumed the current token, which was the key. + // Read the next token, which should be the value, and let the rest of code process it. + tok, err = d.tokenizer.Token() + if err == io.EOF { + return errors.New("unexpected end of JSON input") + } else if err != nil { + return err + } + } + + // Are we inside an array and seeing next value (rather than end of array)? + case d.state() == '[' && tok != json.Delim(']'): + someSliceExist := false + for i := range d.vs { + v := d.vs[i].Top() + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + v = v.Elem() + } + var f reflect.Value + if v.Kind() == reflect.Slice { + // we want to append the template item copy + // so that all the inner structure gets preserved + copied, err := copyTemplate(v.Index(0)) + if err != nil { + return fmt.Errorf("failed to copy template: %w", err) + } + v.Set(reflect.Append(v, copied)) // v = append(v, T). + f = v.Index(v.Len() - 1) + someSliceExist = true + } + d.vs[i] = append(d.vs[i], f) + } + if !someSliceExist { + return fmt.Errorf("slice doesn't exist in any of %v places to unmarshal", len(d.vs)) + } + } + + switch tok := tok.(type) { + case string, json.Number, bool, nil, json.RawMessage: + // Value. + + for i := range d.vs { + v := d.vs[i].Top() + if !v.IsValid() { + continue + } + err := unmarshalValue(tok, v) + if err != nil { + return err + } + } + d.popAllVs() + + case json.Delim: + switch tok { + case '{': + // Start of object. + + d.pushState(tok) + + frontier := make([]reflect.Value, len(d.vs)) // Places to look for GraphQL fragments/embedded structs. + for i := range d.vs { + v := d.vs[i].Top() + frontier[i] = v + // TODO: Do this recursively or not? Add a test case if needed. + if v.Kind() == reflect.Ptr && v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) // v = new(T). + } + } + // Find GraphQL fragments/embedded structs recursively, adding to frontier + // as new ones are discovered and exploring them further. + for len(frontier) > 0 { + v := frontier[0] + frontier = frontier[1:] + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + v = v.Elem() + } + if v.Kind() == reflect.Struct { + for i := 0; i < v.NumField(); i++ { + if isGraphQLFragment(v.Type().Field(i)) || v.Type().Field(i).Anonymous { + // Add GraphQL fragment or embedded struct. + d.vs = append(d.vs, []reflect.Value{v.Field(i)}) + frontier = append(frontier, v.Field(i)) + } + } + } else if isOrderedMap(v) { + for i := 0; i < v.Len(); i++ { + pair := v.Index(i) + key, val := pair.Index(0), pair.Index(1) + if keyForGraphQLFragment(key.Interface().(string)) { + // Add GraphQL fragment or embedded struct. + d.vs = append(d.vs, []reflect.Value{val}) + frontier = append(frontier, val) + } + } + } + } + case '[': + // Start of array. + + d.pushState(tok) + + for i := range d.vs { + v := d.vs[i].Top() + // TODO: Confirm this is needed, write a test case. + //if v.Kind() == reflect.Ptr && v.IsNil() { + // v.Set(reflect.New(v.Type().Elem())) // v = new(T). + //} + + // Reset slice to empty (in case it had non-zero initial value). + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + v = v.Elem() + } + if v.Kind() != reflect.Slice { + continue + } + newSlice := reflect.MakeSlice(v.Type(), 0, 0) // v = make(T, 0, 0). + switch v.Len() { + case 0: + // if there is no template we need to create one so that we can + // handle both cases (with or without a template) in the same way + newSlice = reflect.Append(newSlice, reflect.Zero(v.Type().Elem())) + case 1: + // if there is a template, we need to keep it at index 0 + newSlice = reflect.Append(newSlice, v.Index(0)) + case 2: + return fmt.Errorf("template slice can only have 1 item, got %d", v.Len()) + } + v.Set(newSlice) + } + case '}': + // End of object. + d.popAllVs() + d.popState() + case ']': + // End of array. + d.popLeftArrayTemplates() + d.popAllVs() + d.popState() + default: + return errors.New("unexpected delimiter in JSON input") + } + default: + return errors.New("unexpected token in JSON input") + } + } + return nil +} + +func copyTemplate(template reflect.Value) (reflect.Value, error) { + if isOrderedMap(template) { + // copy slice if it's actually an ordered map + return copyOrderedMap(template), nil + } + if template.Kind() == reflect.Map { + return reflect.Value{}, fmt.Errorf("unsupported template type `%v`, use [][2]interface{} for ordered map instead", template.Type()) + } + // don't need to copy regular slice + return template, nil +} + +func isOrderedMap(v reflect.Value) bool { + if !v.IsValid() { + return false + } + t := v.Type() + return t.Kind() == reflect.Slice && + t.Elem().Kind() == reflect.Array && + t.Elem().Len() == 2 +} + +func copyOrderedMap(m reflect.Value) reflect.Value { + newMap := reflect.MakeSlice(m.Type(), 0, m.Len()) + for i := 0; i < m.Len(); i++ { + pair := m.Index(i) + newMap = reflect.Append(newMap, pair) + } + return newMap +} + +// pushState pushes a new parse state s onto the stack. +func (d *decoder) pushState(s json.Delim) { + d.parseState = append(d.parseState, s) +} + +// popState pops a parse state (already obtained) off the stack. +// The stack must be non-empty. +func (d *decoder) popState() { + d.parseState = d.parseState[:len(d.parseState)-1] +} + +// state reports the parse state on top of stack, or 0 if empty. +func (d *decoder) state() json.Delim { + if len(d.parseState) == 0 { + return 0 + } + return d.parseState[len(d.parseState)-1] +} + +// popAllVs pops from all d.vs stacks, keeping only non-empty ones. +func (d *decoder) popAllVs() { + var nonEmpty []stack + for i := range d.vs { + d.vs[i] = d.vs[i].Pop() + if len(d.vs[i]) > 0 { + nonEmpty = append(nonEmpty, d.vs[i]) + } + } + d.vs = nonEmpty +} + +// popLeftArrayTemplates pops left from last array items of all d.vs stacks. +func (d *decoder) popLeftArrayTemplates() { + for i := range d.vs { + v := d.vs[i].Top() + if v.IsValid() { + v.Set(v.Slice(1, v.Len())) + } + } +} + +// fieldByGraphQLName returns an exported struct field of struct v +// that matches GraphQL name, or invalid reflect.Value if none found. +func fieldByGraphQLName(v reflect.Value, name string) (val reflect.Value, taggedAsScalar bool) { + for i := 0; i < v.NumField(); i++ { + if v.Type().Field(i).PkgPath != "" { + // Skip unexported field. + continue + } + if hasGraphQLName(v.Type().Field(i), name) { + return v.Field(i), hasScalarTag(v.Type().Field(i)) + } + } + return reflect.Value{}, false +} + +// orderedMapValueByGraphQLName takes [][2]string, interprets it as an ordered map +// and returns value for corresponding key, or invalid reflect.Value if none found. +func orderedMapValueByGraphQLName(v reflect.Value, name string) reflect.Value { + for i := 0; i < v.Len(); i++ { + pair := v.Index(i) + key := pair.Index(0).Interface().(string) + if keyHasGraphQLName(key, name) { + return pair.Index(1) + } + } + return reflect.Value{} +} + +func hasScalarTag(f reflect.StructField) bool { + return isTrue(f.Tag.Get("scalar")) +} + +func isTrue(s string) bool { + b, _ := strconv.ParseBool(s) + return b +} + +// hasGraphQLName reports whether struct field f has GraphQL name. +func hasGraphQLName(f reflect.StructField, name string) bool { + value, ok := f.Tag.Lookup("graphql") + if !ok { + // TODO: caseconv package is relatively slow. Optimize it, then consider using it here. + //return caseconv.MixedCapsToLowerCamelCase(f.Name) == name + return strings.EqualFold(f.Name, name) + } + return keyHasGraphQLName(value, name) +} + +func keyHasGraphQLName(value, name string) bool { + value = strings.TrimSpace(value) // TODO: Parse better. + if strings.HasPrefix(value, "...") { + // GraphQL fragment. It doesn't have a name. + return false + } + if i := strings.Index(value, "("); i != -1 { + value = value[:i] + } + if i := strings.Index(value, ":"); i != -1 { + value = value[:i] + } + return strings.TrimSpace(value) == name +} + +// isGraphQLFragment reports whether struct field f is a GraphQL fragment. +func isGraphQLFragment(f reflect.StructField) bool { + value, ok := f.Tag.Lookup("graphql") + if !ok { + return false + } + return keyForGraphQLFragment(value) +} + +// isGraphQLFragment reports whether ordered map kv pair f is a GraphQL fragment. +func keyForGraphQLFragment(value string) bool { + value = strings.TrimSpace(value) // TODO: Parse better. + return strings.HasPrefix(value, "...") +} + +// unmarshalValue unmarshals JSON value into v. +// v must be addressable and not obtained by the use of unexported +// struct fields, otherwise unmarshalValue will panic. +func unmarshalValue(value interface{}, v reflect.Value) error { + b, err := json.Marshal(value) // TODO: Short-circuit (if profiling says it's worth it). + if err != nil { + return err + } + ty := v.Type() + if ty.Kind() == reflect.Interface { + if !v.Elem().IsValid() { + return json.Unmarshal(b, v.Addr().Interface()) + } + ty = v.Elem().Type() + } + newVal := reflect.New(ty) + err = json.Unmarshal(b, newVal.Interface()) + if err != nil { + return err + } + v.Set(newVal.Elem()) + return nil +} diff --git a/vendor/github.com/hasura/go-graphql-client/option.go b/vendor/github.com/hasura/go-graphql-client/option.go new file mode 100644 index 000000000..88ac86503 --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/option.go @@ -0,0 +1,38 @@ +package graphql + +// OptionType represents the logic of graphql query construction +type OptionType string + +const ( + // optionTypeOperationName is private because it's option is built-in and unique + optionTypeOperationName OptionType = "operation_name" + OptionTypeOperationDirective OptionType = "operation_directive" +) + +// Option abstracts an extra render interface for the query string +// They are optional parts. By default GraphQL queries can request data without them +type Option interface { + // Type returns the supported type of the renderer + // available types: operation_name and operation_directive + Type() OptionType + // String returns the query component string + String() string +} + +// operationNameOption represents the operation name render component +type operationNameOption struct { + name string +} + +func (ono operationNameOption) Type() OptionType { + return optionTypeOperationName +} + +func (ono operationNameOption) String() string { + return ono.name +} + +// OperationName creates the operation name option +func OperationName(name string) Option { + return operationNameOption{name} +} diff --git a/vendor/github.com/hasura/go-graphql-client/query.go b/vendor/github.com/hasura/go-graphql-client/query.go new file mode 100644 index 000000000..84d60b846 --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/query.go @@ -0,0 +1,253 @@ +package graphql + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/hasura/go-graphql-client/ident" +) + +type constructOptionsOutput struct { + operationName string + operationDirectives []string +} + +func (coo constructOptionsOutput) OperationDirectivesString() string { + operationDirectivesStr := strings.Join(coo.operationDirectives, " ") + if operationDirectivesStr != "" { + return fmt.Sprintf(" %s ", operationDirectivesStr) + } + return "" +} + +func constructOptions(options []Option) (*constructOptionsOutput, error) { + output := &constructOptionsOutput{} + + for _, option := range options { + switch option.Type() { + case optionTypeOperationName: + output.operationName = option.String() + case OptionTypeOperationDirective: + output.operationDirectives = append(output.operationDirectives, option.String()) + default: + return nil, fmt.Errorf("invalid query option type: %s", option.Type()) + } + } + + return output, nil +} + +func constructQuery(v interface{}, variables map[string]interface{}, options ...Option) (string, error) { + query := query(v) + + optionsOutput, err := constructOptions(options) + if err != nil { + return "", err + } + + if len(variables) > 0 { + return fmt.Sprintf("query %s(%s)%s%s", optionsOutput.operationName, queryArguments(variables), optionsOutput.OperationDirectivesString(), query), nil + } + + if optionsOutput.operationName == "" && len(optionsOutput.operationDirectives) == 0 { + return query, nil + } + + return fmt.Sprintf("query %s%s%s", optionsOutput.operationName, optionsOutput.OperationDirectivesString(), query), nil +} + +func constructMutation(v interface{}, variables map[string]interface{}, options ...Option) (string, error) { + query := query(v) + optionsOutput, err := constructOptions(options) + if err != nil { + return "", err + } + if len(variables) > 0 { + return fmt.Sprintf("mutation %s(%s)%s%s", optionsOutput.operationName, queryArguments(variables), optionsOutput.OperationDirectivesString(), query), nil + } + + if optionsOutput.operationName == "" && len(optionsOutput.operationDirectives) == 0 { + return "mutation" + query, nil + } + + return fmt.Sprintf("mutation %s%s%s", optionsOutput.operationName, optionsOutput.OperationDirectivesString(), query), nil +} + +func constructSubscription(v interface{}, variables map[string]interface{}, options ...Option) (string, error) { + query := query(v) + optionsOutput, err := constructOptions(options) + if err != nil { + return "", err + } + if len(variables) > 0 { + return fmt.Sprintf("subscription %s(%s)%s%s", optionsOutput.operationName, queryArguments(variables), optionsOutput.OperationDirectivesString(), query), nil + } + if optionsOutput.operationName == "" && len(optionsOutput.operationDirectives) == 0 { + return "subscription" + query, nil + } + return fmt.Sprintf("subscription %s%s%s", optionsOutput.operationName, optionsOutput.OperationDirectivesString(), query), nil +} + +// queryArguments constructs a minified arguments string for variables. +// +// E.g., map[string]interface{}{"a": Int(123), "b": NewBoolean(true)} -> "$a:Int!$b:Boolean". +func queryArguments(variables map[string]interface{}) string { + // Sort keys in order to produce deterministic output for testing purposes. + // TODO: If tests can be made to work with non-deterministic output, then no need to sort. + keys := make([]string, 0, len(variables)) + for k := range variables { + keys = append(keys, k) + } + sort.Strings(keys) + + var buf bytes.Buffer + for _, k := range keys { + io.WriteString(&buf, "$") + io.WriteString(&buf, k) + io.WriteString(&buf, ":") + writeArgumentType(&buf, reflect.TypeOf(variables[k]), true) + // Don't insert a comma here. + // Commas in GraphQL are insignificant, and we want minified output. + // See https://facebook.github.io/graphql/October2016/#sec-Insignificant-Commas. + } + return buf.String() +} + +// writeArgumentType writes a minified GraphQL type for t to w. +// value indicates whether t is a value (required) type or pointer (optional) type. +// If value is true, then "!" is written at the end of t. +func writeArgumentType(w io.Writer, t reflect.Type, value bool) { + if t.Kind() == reflect.Ptr { + // Pointer is an optional type, so no "!" at the end of the pointer's underlying type. + writeArgumentType(w, t.Elem(), false) + return + } + + switch t.Kind() { + case reflect.Slice, reflect.Array: + // List. E.g., "[Int]". + io.WriteString(w, "[") + writeArgumentType(w, t.Elem(), true) + io.WriteString(w, "]") + default: + // Named type. E.g., "Int". + name := t.Name() + if name == "string" { // HACK: Workaround for https://github.com/shurcooL/githubv4/issues/12. + name = "ID" + } + io.WriteString(w, name) + } + + if value { + // Value is a required type, so add "!" to the end. + io.WriteString(w, "!") + } +} + +// query uses writeQuery to recursively construct +// a minified query string from the provided struct v. +// +// E.g., struct{Foo Int, BarBaz *Boolean} -> "{foo,barBaz}". +func query(v interface{}) string { + var buf bytes.Buffer + writeQuery(&buf, reflect.TypeOf(v), reflect.ValueOf(v), false) + return buf.String() +} + +// writeQuery writes a minified query for t to w. +// If inline is true, the struct fields of t are inlined into parent struct. +func writeQuery(w io.Writer, t reflect.Type, v reflect.Value, inline bool) { + switch t.Kind() { + case reflect.Ptr: + writeQuery(w, t.Elem(), ElemSafe(v), false) + case reflect.Struct: + // If the type implements json.Unmarshaler, it's a scalar. Don't expand it. + if reflect.PtrTo(t).Implements(jsonUnmarshaler) { + return + } + if !inline { + io.WriteString(w, "{") + } + for i := 0; i < t.NumField(); i++ { + if i != 0 { + io.WriteString(w, ",") + } + f := t.Field(i) + value, ok := f.Tag.Lookup("graphql") + inlineField := f.Anonymous && !ok + if !inlineField { + if ok { + io.WriteString(w, value) + } else { + io.WriteString(w, ident.ParseMixedCaps(f.Name).ToLowerCamelCase()) + } + } + // Skip writeQuery if the GraphQL type associated with the filed is scalar + if isTrue(f.Tag.Get("scalar")) { + continue + } + writeQuery(w, f.Type, FieldSafe(v, i), inlineField) + } + if !inline { + io.WriteString(w, "}") + } + case reflect.Slice: + if t.Elem().Kind() != reflect.Array { + writeQuery(w, t.Elem(), IndexSafe(v, 0), false) + return + } + // handle [][2]interface{} like an ordered map + if t.Elem().Len() != 2 { + err := fmt.Errorf("only arrays of len 2 are supported, got %v", t.Elem()) + panic(err.Error()) + } + sliceOfPairs := v + _, _ = io.WriteString(w, "{") + for i := 0; i < sliceOfPairs.Len(); i++ { + pair := sliceOfPairs.Index(i) + // it.Value() returns interface{}, so we need to use reflect.ValueOf + // to cast it away + key, val := pair.Index(0), reflect.ValueOf(pair.Index(1).Interface()) + _, _ = io.WriteString(w, key.Interface().(string)) + writeQuery(w, val.Type(), val, false) + } + _, _ = io.WriteString(w, "}") + case reflect.Map: + err := fmt.Errorf("type %v is not supported, use [][2]interface{} instead", t) + panic(err.Error()) + } +} + +func IndexSafe(v reflect.Value, i int) reflect.Value { + if v.IsValid() && i < v.Len() { + return v.Index(i) + } + return reflect.ValueOf(nil) +} + +func ElemSafe(v reflect.Value) reflect.Value { + if v.IsValid() { + return v.Elem() + } + return reflect.ValueOf(nil) +} + +func FieldSafe(valStruct reflect.Value, i int) reflect.Value { + if valStruct.IsValid() { + return valStruct.Field(i) + } + return reflect.ValueOf(nil) +} + +var jsonUnmarshaler = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + +func isTrue(s string) bool { + b, _ := strconv.ParseBool(s) + return b +} diff --git a/vendor/github.com/hasura/go-graphql-client/scalar.go b/vendor/github.com/hasura/go-graphql-client/scalar.go new file mode 100644 index 000000000..0f7ceea5e --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/scalar.go @@ -0,0 +1,51 @@ +package graphql + +// Note: These custom types are meant to be used in queries for now. +// But the plan is to switch to using native Go types (string, int, bool, time.Time, etc.). +// See https://github.com/shurcooL/githubv4/issues/9 for details. +// +// These custom types currently provide documentation, and their use +// is required for sending outbound queries. However, native Go types +// can be used for unmarshaling. Once https://github.com/shurcooL/githubv4/issues/9 +// is resolved, native Go types can completely replace these. + +type ( + // Boolean represents true or false values. + Boolean bool + + // Float represents signed double-precision fractional values as + // specified by IEEE 754. + Float float64 + + // ID represents a unique identifier that is Base64 obfuscated. It + // is often used to refetch an object or as key for a cache. The ID + // type appears in a JSON response as a String; however, it is not + // intended to be human-readable. When expected as an input type, + // any string (such as "VXNlci0xMA==") or integer (such as 4) input + // value will be accepted as an ID. + ID interface{} + + // Int represents non-fractional signed whole numeric values. + // Int can represent values between -(2^31) and 2^31 - 1. + Int int32 + + // String represents textual data as UTF-8 character sequences. + // This type is most often used by GraphQL to represent free-form + // human-readable text. + String string +) + +// NewBoolean is a helper to make a new *Boolean. +func NewBoolean(v Boolean) *Boolean { return &v } + +// NewFloat is a helper to make a new *Float. +func NewFloat(v Float) *Float { return &v } + +// NewID is a helper to make a new *ID. +func NewID(v ID) *ID { return &v } + +// NewInt is a helper to make a new *Int. +func NewInt(v Int) *Int { return &v } + +// NewString is a helper to make a new *String. +func NewString(v String) *String { return &v } diff --git a/vendor/github.com/hasura/go-graphql-client/subscription.go b/vendor/github.com/hasura/go-graphql-client/subscription.go new file mode 100644 index 000000000..6c35e0d43 --- /dev/null +++ b/vendor/github.com/hasura/go-graphql-client/subscription.go @@ -0,0 +1,616 @@ +package graphql + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/google/uuid" + "nhooyr.io/websocket" + "nhooyr.io/websocket/wsjson" +) + +// Subscription transport follow Apollo's subscriptions-transport-ws protocol specification +// https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md + +// OperationMessageType +type OperationMessageType string + +const ( + // Client sends this message after plain websocket connection to start the communication with the server + GQL_CONNECTION_INIT OperationMessageType = "connection_init" + // The server may responses with this message to the GQL_CONNECTION_INIT from client, indicates the server rejected the connection. + GQL_CONNECTION_ERROR OperationMessageType = "conn_err" + // Client sends this message to execute GraphQL operation + GQL_START OperationMessageType = "start" + // Client sends this message in order to stop a running GraphQL operation execution (for example: unsubscribe) + GQL_STOP OperationMessageType = "stop" + // Server sends this message upon a failing operation, before the GraphQL execution, usually due to GraphQL validation errors (resolver errors are part of GQL_DATA message, and will be added as errors array) + GQL_ERROR OperationMessageType = "error" + // The server sends this message to transfter the GraphQL execution result from the server to the client, this message is a response for GQL_START message. + GQL_DATA OperationMessageType = "data" + // Server sends this message to indicate that a GraphQL operation is done, and no more data will arrive for the specific operation. + GQL_COMPLETE OperationMessageType = "complete" + // Server message that should be sent right after each GQL_CONNECTION_ACK processed and then periodically to keep the client connection alive. + // The client starts to consider the keep alive message only upon the first received keep alive message from the server. + GQL_CONNECTION_KEEP_ALIVE OperationMessageType = "ka" + // The server may responses with this message to the GQL_CONNECTION_INIT from client, indicates the server accepted the connection. May optionally include a payload. + GQL_CONNECTION_ACK OperationMessageType = "connection_ack" + // Client sends this message to terminate the connection. + GQL_CONNECTION_TERMINATE OperationMessageType = "connection_terminate" + // Unknown operation type, for logging only + GQL_UNKNOWN OperationMessageType = "unknown" + // Internal status, for logging only + GQL_INTERNAL OperationMessageType = "internal" +) + +type OperationMessage struct { + ID string `json:"id,omitempty"` + Type OperationMessageType `json:"type"` + Payload json.RawMessage `json:"payload,omitempty"` +} + +func (om OperationMessage) String() string { + bs, _ := json.Marshal(om) + + return string(bs) +} + +// WebsocketHandler abstracts WebSocket connection functions +// ReadJSON and WriteJSON data of a frame from the WebSocket connection. +// Close the WebSocket connection. +type WebsocketConn interface { + ReadJSON(v interface{}) error + WriteJSON(v interface{}) error + Close() error + // SetReadLimit sets the maximum size in bytes for a message read from the peer. If a + // message exceeds the limit, the connection sends a close message to the peer + // and returns ErrReadLimit to the application. + SetReadLimit(limit int64) +} + +type handlerFunc func(data *json.RawMessage, err error) error +type subscription struct { + query string + variables map[string]interface{} + handler func(data *json.RawMessage, err error) + started Boolean +} + +// SubscriptionClient is a GraphQL subscription client. +type SubscriptionClient struct { + url string + conn WebsocketConn + connectionParams map[string]interface{} + context context.Context + subscriptions map[string]*subscription + cancel context.CancelFunc + subscribersMu sync.Mutex + timeout time.Duration + isRunning int64 + readLimit int64 // max size of response message. Default 10 MB + log func(args ...interface{}) + createConn func(sc *SubscriptionClient) (WebsocketConn, error) + retryTimeout time.Duration + onConnected func() + onDisconnected func() + onError func(sc *SubscriptionClient, err error) error + errorChan chan error + disabledLogTypes []OperationMessageType +} + +func NewSubscriptionClient(url string) *SubscriptionClient { + return &SubscriptionClient{ + url: url, + timeout: time.Minute, + readLimit: 10 * 1024 * 1024, // set default limit 10MB + subscriptions: make(map[string]*subscription), + createConn: newWebsocketConn, + retryTimeout: time.Minute, + errorChan: make(chan error), + } +} + +// GetURL returns GraphQL server's URL +func (sc *SubscriptionClient) GetURL() string { + return sc.url +} + +// GetContext returns current context of subscription client +func (sc *SubscriptionClient) GetContext() context.Context { + return sc.context +} + +// GetContext returns write timeout of websocket client +func (sc *SubscriptionClient) GetTimeout() time.Duration { + return sc.timeout +} + +// WithWebSocket replaces customized websocket client constructor +// In default, subscription client uses https://github.com/nhooyr/websocket +func (sc *SubscriptionClient) WithWebSocket(fn func(sc *SubscriptionClient) (WebsocketConn, error)) *SubscriptionClient { + sc.createConn = fn + return sc +} + +// WithConnectionParams updates connection params for sending to server through GQL_CONNECTION_INIT event +// It's usually used for authentication handshake +func (sc *SubscriptionClient) WithConnectionParams(params map[string]interface{}) *SubscriptionClient { + sc.connectionParams = params + return sc +} + +// WithTimeout updates write timeout of websocket client +func (sc *SubscriptionClient) WithTimeout(timeout time.Duration) *SubscriptionClient { + sc.timeout = timeout + return sc +} + +// WithRetryTimeout updates reconnecting timeout. When the websocket server was stopped, the client will retry connecting every second until timeout +func (sc *SubscriptionClient) WithRetryTimeout(timeout time.Duration) *SubscriptionClient { + sc.retryTimeout = timeout + return sc +} + +// WithLog sets loging function to print out received messages. By default, nothing is printed +func (sc *SubscriptionClient) WithLog(logger func(args ...interface{})) *SubscriptionClient { + sc.log = logger + return sc +} + +// WithoutLogTypes these operation types won't be printed +func (sc *SubscriptionClient) WithoutLogTypes(types ...OperationMessageType) *SubscriptionClient { + sc.disabledLogTypes = types + return sc +} + +// WithReadLimit set max size of response message +func (sc *SubscriptionClient) WithReadLimit(limit int64) *SubscriptionClient { + sc.readLimit = limit + return sc +} + +// OnConnected event is triggered when there is any connection error. This is bottom exception handler level +// If this function is empty, or returns nil, the error is ignored +// If returns error, the websocket connection will be terminated +func (sc *SubscriptionClient) OnError(onError func(sc *SubscriptionClient, err error) error) *SubscriptionClient { + sc.onError = onError + return sc +} + +// OnConnected event is triggered when the websocket connected to GraphQL server successfully +func (sc *SubscriptionClient) OnConnected(fn func()) *SubscriptionClient { + sc.onConnected = fn + return sc +} + +// OnDisconnected event is triggered when the websocket server was still down after retry timeout +func (sc *SubscriptionClient) OnDisconnected(fn func()) *SubscriptionClient { + sc.onDisconnected = fn + return sc +} + +func (sc *SubscriptionClient) setIsRunning(value Boolean) { + if value { + atomic.StoreInt64(&sc.isRunning, 1) + } else { + atomic.StoreInt64(&sc.isRunning, 0) + } +} + +func (sc *SubscriptionClient) init() error { + + now := time.Now() + ctx, cancel := context.WithCancel(context.Background()) + sc.context = ctx + sc.cancel = cancel + + for { + var err error + var conn WebsocketConn + // allow custom websocket client + if sc.conn == nil { + conn, err = sc.createConn(sc) + if err == nil { + sc.conn = conn + } + } + + if err == nil { + sc.conn.SetReadLimit(sc.readLimit) + // send connection init event to the server + err = sc.sendConnectionInit() + } + + if err == nil { + return nil + } + + if now.Add(sc.retryTimeout).Before(time.Now()) { + if sc.onDisconnected != nil { + sc.onDisconnected() + } + return err + } + sc.printLog(err.Error()+". retry in second....", GQL_INTERNAL) + time.Sleep(time.Second) + } +} + +func (sc *SubscriptionClient) printLog(message interface{}, opType OperationMessageType) { + if sc.log == nil { + return + } + for _, ty := range sc.disabledLogTypes { + if ty == opType { + return + } + } + + sc.log(message) +} + +func (sc *SubscriptionClient) sendConnectionInit() (err error) { + var bParams []byte = nil + if sc.connectionParams != nil { + + bParams, err = json.Marshal(sc.connectionParams) + if err != nil { + return + } + } + + // send connection_init event to the server + msg := OperationMessage{ + Type: GQL_CONNECTION_INIT, + Payload: bParams, + } + + sc.printLog(msg, GQL_CONNECTION_INIT) + return sc.conn.WriteJSON(msg) +} + +// Subscribe sends start message to server and open a channel to receive data. +// The handler callback function will receive raw message data or error. If the call return error, onError event will be triggered +// The function returns subscription ID and error. You can use subscription ID to unsubscribe the subscription +func (sc *SubscriptionClient) Subscribe(v interface{}, variables map[string]interface{}, handler func(message *json.RawMessage, err error) error, options ...Option) (string, error) { + return sc.do(v, variables, handler, options...) +} + +// NamedSubscribe sends start message to server and open a channel to receive data, with operation name +// +// Deprecated: this is the shortcut of Subscribe method, with NewOperationName option +func (sc *SubscriptionClient) NamedSubscribe(name string, v interface{}, variables map[string]interface{}, handler func(message *json.RawMessage, err error) error, options ...Option) (string, error) { + return sc.do(v, variables, handler, append(options, OperationName(name))...) +} + +// SubscribeRaw sends start message to server and open a channel to receive data, with raw query +func (sc *SubscriptionClient) SubscribeRaw(query string, variables map[string]interface{}, handler func(message *json.RawMessage, err error) error) (string, error) { + return sc.doRaw(query, variables, handler) +} + +func (sc *SubscriptionClient) do(v interface{}, variables map[string]interface{}, handler func(message *json.RawMessage, err error) error, options ...Option) (string, error) { + query, err := constructSubscription(v, variables, options...) + if err != nil { + return "", err + } + + return sc.doRaw(query, variables, handler) +} + +func (sc *SubscriptionClient) doRaw(query string, variables map[string]interface{}, handler func(message *json.RawMessage, err error) error) (string, error) { + id := uuid.New().String() + + sub := subscription{ + query: query, + variables: variables, + handler: sc.wrapHandler(handler), + } + + // if the websocket client is running, start subscription immediately + if atomic.LoadInt64(&sc.isRunning) > 0 { + if err := sc.startSubscription(id, &sub); err != nil { + return "", err + } + } + + sc.subscribersMu.Lock() + sc.subscriptions[id] = &sub + sc.subscribersMu.Unlock() + + return id, nil +} + +// Subscribe sends start message to server and open a channel to receive data +func (sc *SubscriptionClient) startSubscription(id string, sub *subscription) error { + if sub == nil || sub.started { + return nil + } + + in := struct { + Query string `json:"query"` + Variables map[string]interface{} `json:"variables,omitempty"` + }{ + Query: sub.query, + Variables: sub.variables, + } + + payload, err := json.Marshal(in) + if err != nil { + return err + } + + // send stop message to the server + msg := OperationMessage{ + ID: id, + Type: GQL_START, + Payload: payload, + } + + sc.printLog(msg, GQL_START) + if err := sc.conn.WriteJSON(msg); err != nil { + return err + } + + sub.started = true + return nil +} + +func (sc *SubscriptionClient) wrapHandler(fn handlerFunc) func(data *json.RawMessage, err error) { + return func(data *json.RawMessage, err error) { + if errValue := fn(data, err); errValue != nil { + sc.errorChan <- errValue + } + } +} + +// Run start websocket client and subscriptions. If this function is run with goroutine, it can be stopped after closed +func (sc *SubscriptionClient) Run() error { + if err := sc.init(); err != nil { + return fmt.Errorf("retry timeout. exiting...") + } + + // lazily start subscriptions + sc.subscribersMu.Lock() + for k, v := range sc.subscriptions { + if err := sc.startSubscription(k, v); err != nil { + sc.Unsubscribe(k) + return err + } + } + sc.subscribersMu.Unlock() + + sc.setIsRunning(true) + + for atomic.LoadInt64(&sc.isRunning) > 0 { + select { + case <-sc.context.Done(): + return nil + case e := <-sc.errorChan: + if sc.onError != nil { + if err := sc.onError(sc, e); err != nil { + return err + } + } + default: + + var message OperationMessage + if err := sc.conn.ReadJSON(&message); err != nil { + // manual EOF check + if err == io.EOF || strings.Contains(err.Error(), "EOF") { + return sc.Reset() + } + closeStatus := websocket.CloseStatus(err) + if closeStatus == websocket.StatusNormalClosure { + // close event from websocket client, exiting... + return nil + } + if closeStatus != -1 { + sc.printLog(fmt.Sprintf("%s. Retry connecting...", err), GQL_INTERNAL) + return sc.Reset() + } + + if sc.onError != nil { + if err = sc.onError(sc, err); err != nil { + return err + } + } + continue + } + + switch message.Type { + case GQL_ERROR: + sc.printLog(message, GQL_ERROR) + fallthrough + case GQL_DATA: + sc.printLog(message, GQL_DATA) + id, err := uuid.Parse(message.ID) + if err != nil { + continue + } + + sc.subscribersMu.Lock() + sub, ok := sc.subscriptions[id.String()] + sc.subscribersMu.Unlock() + + if !ok { + continue + } + var out struct { + Data *json.RawMessage + Errors Errors + } + + err = json.Unmarshal(message.Payload, &out) + if err != nil { + go sub.handler(nil, err) + continue + } + if len(out.Errors) > 0 { + go sub.handler(nil, out.Errors) + continue + } + + go sub.handler(out.Data, nil) + case GQL_CONNECTION_ERROR: + sc.printLog(message, GQL_CONNECTION_ERROR) + case GQL_COMPLETE: + sc.printLog(message, GQL_COMPLETE) + sc.Unsubscribe(message.ID) + case GQL_CONNECTION_KEEP_ALIVE: + sc.printLog(message, GQL_CONNECTION_KEEP_ALIVE) + case GQL_CONNECTION_ACK: + sc.printLog(message, GQL_CONNECTION_ACK) + if sc.onConnected != nil { + sc.onConnected() + } + default: + sc.printLog(message, GQL_UNKNOWN) + } + } + } + + // if the running status is false, stop retrying + if atomic.LoadInt64(&sc.isRunning) == 0 { + return nil + } + + return sc.Reset() +} + +// Unsubscribe sends stop message to server and close subscription channel +// The input parameter is subscription ID that is returned from Subscribe function +func (sc *SubscriptionClient) Unsubscribe(id string) error { + sc.subscribersMu.Lock() + defer sc.subscribersMu.Unlock() + + _, ok := sc.subscriptions[id] + if !ok { + return fmt.Errorf("subscription id %s doesn't not exist", id) + } + + delete(sc.subscriptions, id) + return sc.stopSubscription(id) +} + +func (sc *SubscriptionClient) stopSubscription(id string) error { + if sc.conn != nil { + // send stop message to the server + msg := OperationMessage{ + ID: id, + Type: GQL_STOP, + } + + sc.printLog(msg, GQL_STOP) + if err := sc.conn.WriteJSON(msg); err != nil { + return err + } + + } + + return nil +} + +func (sc *SubscriptionClient) terminate() error { + if sc.conn != nil { + // send terminate message to the server + msg := OperationMessage{ + Type: GQL_CONNECTION_TERMINATE, + } + + sc.printLog(msg, GQL_CONNECTION_TERMINATE) + return sc.conn.WriteJSON(msg) + } + + return nil +} + +// Reset restart websocket connection and subscriptions +func (sc *SubscriptionClient) Reset() error { + if atomic.LoadInt64(&sc.isRunning) == 0 { + return nil + } + + sc.subscribersMu.Lock() + for id, sub := range sc.subscriptions { + _ = sc.stopSubscription(id) + sub.started = false + } + sc.subscribersMu.Unlock() + + if sc.conn != nil { + _ = sc.terminate() + _ = sc.conn.Close() + sc.conn = nil + } + sc.cancel() + + return sc.Run() +} + +// Close closes all subscription channel and websocket as well +func (sc *SubscriptionClient) Close() (err error) { + sc.setIsRunning(false) + + for id := range sc.subscriptions { + if err = sc.Unsubscribe(id); err != nil { + sc.cancel() + return err + } + } + + if sc.conn != nil { + _ = sc.terminate() + err = sc.conn.Close() + sc.conn = nil + } + sc.cancel() + + return +} + +// default websocket handler implementation using https://github.com/nhooyr/websocket +type WebsocketHandler struct { + ctx context.Context + timeout time.Duration + *websocket.Conn +} + +func (wh *WebsocketHandler) WriteJSON(v interface{}) error { + ctx, cancel := context.WithTimeout(wh.ctx, wh.timeout) + defer cancel() + + return wsjson.Write(ctx, wh.Conn, v) +} + +func (wh *WebsocketHandler) ReadJSON(v interface{}) error { + ctx, cancel := context.WithTimeout(wh.ctx, wh.timeout) + defer cancel() + return wsjson.Read(ctx, wh.Conn, v) +} + +func (wh *WebsocketHandler) Close() error { + return wh.Conn.Close(websocket.StatusNormalClosure, "close websocket") +} + +func newWebsocketConn(sc *SubscriptionClient) (WebsocketConn, error) { + + options := &websocket.DialOptions{ + Subprotocols: []string{"graphql-ws"}, + } + c, _, err := websocket.Dial(sc.GetContext(), sc.GetURL(), options) + if err != nil { + return nil, err + } + + return &WebsocketHandler{ + ctx: sc.GetContext(), + Conn: c, + timeout: sc.GetTimeout(), + }, nil +} diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 000000000..955dc0be5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 000000000..15556530a --- /dev/null +++ b/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 000000000..449e67cd0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 000000000..c8a9fbb38 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 000000000..313a0f887 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 000000000..2cf4f5ab2 --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 000000000..52b111d5f --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,87 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --------------- | ----------- | ---------------- | ---------------- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import jsoniter "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +- [thockin](https://github.com/thockin) +- [mattn](https://github.com/mattn) +- [cch123](https://github.com/cch123) +- [Oleg Shaldybin](https://github.com/olegshaldybin) +- [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 000000000..92d2cc4a3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 000000000..f6b8aeab0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 000000000..0449e9aa4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 000000000..9452324af --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 000000000..35fdb0949 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 000000000..1b56f3991 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 000000000..c440d72b6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 000000000..1d859eac3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 000000000..d04cb54c1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 000000000..9d1e901a6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 000000000..c44ef5c98 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 000000000..1f12f6612 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + endPos := startPos + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 000000000..656bbd33d --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 000000000..7df2fce33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 000000000..b45ef6883 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 000000000..2adcdc3b7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + defer cfg.ReturnIterator(iter) + iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + stream.WriteRaw("null") + } else { + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 000000000..3095662b0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod new file mode 100644 index 000000000..e05c42ff5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.mod @@ -0,0 +1,11 @@ +module github.com/json-iterator/go + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/gofuzz v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum new file mode 100644 index 000000000..d778b5a14 --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 000000000..29b31cf78 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,349 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + depth int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + depth: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + depth: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + depth: 0, + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + iter.depth = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + iter.depth = 0 + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 000000000..204fe0e09 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,64 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + return iter.decrementDepth() + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 000000000..b9754638e --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,339 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 000000000..214232035 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,345 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 000000000..58ee89c84 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 000000000..e91eefb15 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 000000000..9303de41e --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,163 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + if !iter.incrementDepth() { + return + } + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case ']': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + if !iter.incrementDepth() { + return + } + + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case '}': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 000000000..6cf66d043 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 000000000..adc487ea8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 000000000..c2934f916 --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 000000000..e2389b56c --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 000000000..74974ba74 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,337 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 000000000..13a0b7b08 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 000000000..8b6bc8b43 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 000000000..74a97bfe5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + if tag == "-" || field.Name() == "_" { + continue + } + tagParts := strings.Split(tag, ",") + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 000000000..98d45c1ec --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 000000000..f2619936c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,60 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 000000000..582967130 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer()[subStreamIndex:] + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer()[subStreamIndex:], + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 000000000..3e21f3756 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,225 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 000000000..f88722d14 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 000000000..fa71f4748 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,129 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 000000000..9441d79df --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 000000000..d7eb0eb5c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1092 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } + iter.decrementDepth() +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 000000000..152e3ef5a --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 000000000..23d8a3ad6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + _, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[:0] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 000000000..826aa594a --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 000000000..d1059ee4c --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 000000000..54c2ba0b3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML